<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMI</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Med Inform</journal-id>
      <journal-title>JMIR Medical Informatics</journal-title>
      <issn pub-type="epub">2291-9694</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v9i12e22798</article-id>
      <article-id pub-id-type="pmid">34860674</article-id>
      <article-id pub-id-type="doi">10.2196/22798</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Deep Learning–Assisted Burn Wound Diagnosis: Diagnostic Model Development Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Kukafka</surname>
            <given-names>Rita</given-names>
          </name>
        </contrib>
        <contrib contrib-type="editor">
          <name>
            <surname>Eysenbach</surname>
            <given-names>Gunther</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Ahmad</surname>
            <given-names>Kashif</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Shams</surname>
            <given-names>Shayan</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author">
          <name name-style="western">
            <surname>Chang</surname>
            <given-names>Che Wei</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-5864-017X</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Lai</surname>
            <given-names>Feipei</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Graduate Institute of Biomedical Electronics &#38; Bioinformatics</institution>
            <institution>National Taiwan University</institution>
            <addr-line>Room 419, Computer Science and Information Engineering-Der Tian Hall, No 1</addr-line>
            <addr-line>Roosevelt Road, Sec 4</addr-line>
            <addr-line>Taipei, 106319</addr-line>
            <country>Taiwan</country>
            <phone>886 2 3366 4888 ext 419</phone>
            <email>flai@ntu.edu.tw</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-7147-8122</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Christian</surname>
            <given-names>Mesakh</given-names>
          </name>
          <degrees>BSc</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-2342-4722</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Chen</surname>
            <given-names>Yu Chun</given-names>
          </name>
          <degrees>BSc</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-5163-2847</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Hsu</surname>
            <given-names>Ching</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-0560-1019</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Chen</surname>
            <given-names>Yo Shen</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-9537-2699</ext-link>
        </contrib>
        <contrib id="contrib7" contrib-type="author">
          <name name-style="western">
            <surname>Chang</surname>
            <given-names>Dun Hao</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-5578-7603</ext-link>
        </contrib>
        <contrib id="contrib8" contrib-type="author">
          <name name-style="western">
            <surname>Roan</surname>
            <given-names>Tyng Luen</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-3516-4721</ext-link>
        </contrib>
        <contrib id="contrib9" contrib-type="author">
          <name name-style="western">
            <surname>Yu</surname>
            <given-names>Yen Che</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-9854-3155</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Graduate Institute of Biomedical Electronics &#38; Bioinformatics</institution>
        <institution>National Taiwan University</institution>
        <addr-line>Taipei</addr-line>
        <country>Taiwan</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Division of Plastic and Reconstructive Surgery</institution>
        <institution>Department of Surgery</institution>
        <institution>Far Eastern Memorial Hospital</institution>
        <addr-line>New Taipei</addr-line>
        <country>Taiwan</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Department of Computer Science &#38; Information Engineering</institution>
        <institution>National Taiwan University</institution>
        <addr-line>Taipei</addr-line>
        <country>Taiwan</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Department of Information Management</institution>
        <institution>Yuan Ze University</institution>
        <addr-line>Chung-Li</addr-line>
        <country>Taiwan</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Feipei Lai <email>flai@ntu.edu.tw</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <month>12</month>
        <year>2021</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>2</day>
        <month>12</month>
        <year>2021</year>
      </pub-date>
      <volume>9</volume>
      <issue>12</issue>
      <elocation-id>e22798</elocation-id>
      <history>
        <date date-type="received">
          <day>12</day>
          <month>8</month>
          <year>2020</year>
        </date>
        <date date-type="rev-request">
          <day>6</day>
          <month>12</month>
          <year>2020</year>
        </date>
        <date date-type="rev-recd">
          <day>19</day>
          <month>12</month>
          <year>2020</year>
        </date>
        <date date-type="accepted">
          <day>15</day>
          <month>10</month>
          <year>2021</year>
        </date>
      </history>
      <copyright-statement>©Che Wei Chang, Feipei Lai, Mesakh Christian, Yu Chun Chen, Ching Hsu, Yo Shen Chen, Dun Hao Chang, Tyng Luen Roan, Yen Che Yu. Originally published in JMIR Medical Informatics (https://medinform.jmir.org), 02.12.2021.</copyright-statement>
      <copyright-year>2021</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Informatics, is properly cited. The complete bibliographic information, a link to the original publication on https://medinform.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://medinform.jmir.org/2021/12/e22798" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Accurate assessment of the percentage total body surface area (%TBSA) of burn wounds is crucial in the management of burn patients. The resuscitation fluid and nutritional needs of burn patients, their need for intensive unit care, and probability of mortality are all directly related to %TBSA. It is difficult to estimate a burn area of irregular shape by inspection. Many articles have reported discrepancies in estimating %TBSA by different doctors.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>We propose a method, based on deep learning, for burn wound detection, segmentation, and calculation of %TBSA on a pixel-to-pixel basis.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>A 2-step procedure was used to convert burn wound diagnosis into %TBSA. In the first step, images of burn wounds were collected from medical records and labeled by burn surgeons, and the data set was then input into 2 deep learning architectures, U-Net and Mask R-CNN, each configured with 2 different backbones, to segment the burn wounds. In the second step, we collected and labeled images of hands to create another data set, which was also input into U-Net and Mask R-CNN to segment the hands. The %TBSA of burn wounds was then calculated by comparing the pixels of mask areas on images of the burn wound and hand of the same patient according to the rule of hand, which states that one’s hand accounts for 0.8% of TBSA.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>A total of 2591 images of burn wounds were collected and labeled to form the burn wound data set. The data set was randomly split into training, validation, and testing sets in a ratio of 8:1:1. Four hundred images of volar hands were collected and labeled to form the hand data set, which was also split into 3 sets using the same method. For the images of burn wounds, Mask R-CNN with ResNet101 had the best segmentation result with a Dice coefficient (DC) of 0.9496, while U-Net with ResNet101 had a DC of 0.8545. For the hand images, U-Net and Mask R-CNN had similar performance with DC values of 0.9920 and 0.9910, respectively. Lastly, we conducted a test diagnosis in a burn patient. Mask R-CNN with ResNet101 had on average less deviation (0.115% TBSA) from the ground truth than burn surgeons.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>This is one of the first studies to diagnose all depths of burn wounds and convert the segmentation results into %TBSA using different deep learning models. We aimed to assist medical staff in estimating burn size more accurately, thereby helping to provide precise care to burn victims.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>deep learning</kwd>
        <kwd>semantic segmentation</kwd>
        <kwd>instance segmentation</kwd>
        <kwd>burn wounds</kwd>
        <kwd>percentage total body surface area</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Background</title>
        <p>According to the World Health Organization, an estimated 265,000 deaths occur each year from burn injuries. In the United States, burn injuries result in 10 million visits to the emergency department and 40,000 patients requiring hospitalization annually. The most critical aspect of managing burn injuries is the accurate calculation of the burn area, expressed as percentage total body surface area (%TBSA). However, many articles have reported discrepancies in the %TBSA diagnosed by different doctors. In adult burn injuries, Harish et al reported that overestimation by the referring institution occurred in 53% of cases and that the difference was statistically significant [<xref ref-type="bibr" rid="ref1">1</xref>]. In child burn injuries from a national survey, Baartmans et al reported that burn size was often overestimated by referrers, by up to 30% TBSA, while underestimation was up to 13% TBSA [<xref ref-type="bibr" rid="ref2">2</xref>].</p>
        <p>There are 2 types of inaccurate estimations of burn injuries: misdiagnosis of burn depth and miscalculation of burn area. Misdiagnosis of burn depth comes from the dynamic nature of wound change. The initial presentation of burn depth may be quite different from the presentation several days after injury. Hence, the reported accuracy of diagnosis of burn depth is only 64% to 76% among experienced burn surgeons [<xref ref-type="bibr" rid="ref3">3</xref>]. When evaluations are performed by less experienced practitioners, the accuracy declines to 50%. Fortunately, many technologies have been developed for accurate diagnosis of burn depth, such as laser Doppler imaging (LDI), infrared thermography, and photoacoustic imaging [<xref ref-type="bibr" rid="ref4">4</xref>-<xref ref-type="bibr" rid="ref7">7</xref>]. For example, LDI, which is based on perfusion in the burn area, provides information that is highly correlated with burn wound healing potential. Healing potential is a practical indicator of burn depth.</p>
        <p>Though the assessment of burn depth with such technologies is often satisfactory, miscalculation of burn area may be hard to avoid. Such miscalculation often occurs when an area of irregular shape is estimated by comparing it with another area of irregular shape, for example, estimating the %TBSA of an irregularly shaped burn area on the upper extremity of an adult using the estimation that the upper extremity has roughly 7% to 9% TBSA as a guide [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>]. In an interesting study, Parvizi et al reported that even when participants reached consensus on the margin of the burn wound, their estimations of %TBSA were still different [<xref ref-type="bibr" rid="ref10">10</xref>]. The difference in %TBSA resulted in discrepancies in estimating the amount of resuscitation fluid needed by as much as 5280 mL using the Parkland formula. Clearly, there is an unmet need to improve the accuracy of burn diagnosis.</p>
        <p>Machine learning has many applications in the field of medicine, such as in drug development and disease diagnosis [<xref ref-type="bibr" rid="ref11">11</xref>-<xref ref-type="bibr" rid="ref14">14</xref>]. Although machine learning has also been implemented in many aspects of surgery, its application in burn care is relatively rare [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>]. Burn care is a field where human error can be reduced by computer assistance.</p>
      </sec>
      <sec>
        <title>Prior Work</title>
        <p>Early work in the use of machine learning to assist burn diagnosis focused on classification of burn depth (<xref ref-type="table" rid="table1">Table 1</xref>). Since burn injuries result in a mixture of different burn depths, most images of burn wounds cannot be simply classified as superficial partial burn, deep partial burn, or full thickness burn. Before images of burn wounds are input for feature extraction, the images need to be processed.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Segmentation of burn wounds.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="180"/>
            <col width="190"/>
            <col width="220"/>
            <col width="210"/>
            <col width="200"/>
            <thead>
              <tr valign="top">
                <td>Study</td>
                <td>Image database</td>
                <td>Model</td>
                <td>Performance metric</td>
                <td>Objective</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Serrano et al [<xref ref-type="bibr" rid="ref17">17</xref>]</td>
                <td>38 images</td>
                <td>Fuzzy-ARTMAP</td>
                <td>Accuracy 88.57%</td>
                <td>Burn depth</td>
              </tr>
              <tr valign="top">
                <td>Acha et al [<xref ref-type="bibr" rid="ref18">18</xref>]</td>
                <td>50 images</td>
                <td>Fuzzy-ARTMAP</td>
                <td>Accuracy 82.26%</td>
                <td>Burn depth</td>
              </tr>
              <tr valign="top">
                <td>Acha et al [<xref ref-type="bibr" rid="ref19">19</xref>]</td>
                <td>50 images</td>
                <td>SVM<sup>a</sup>, Fuzzy-ARTMAP</td>
                <td>Error rate 0.7%</td>
                <td>Burn depth</td>
              </tr>
              <tr valign="top">
                <td>Acha et al [<xref ref-type="bibr" rid="ref20">20</xref>]</td>
                <td>74 images</td>
                <td>KNN<sup>b</sup>, MDS<sup>c</sup></td>
                <td>Accuracy 83.8%</td>
                <td>Need for skin grafts</td>
              </tr>
              <tr valign="top">
                <td>Serrano et al [<xref ref-type="bibr" rid="ref21">21</xref>]</td>
                <td>94 images</td>
                <td>SVM, MDS</td>
                <td>Accuracy 79.73%</td>
                <td>Need for skin grafts</td>
              </tr>
              <tr valign="top">
                <td>Cirillo et al [<xref ref-type="bibr" rid="ref22">22</xref>]</td>
                <td>23 images</td>
                <td>VGG16, GoogleNet, ResNet50, ResNet101</td>
                <td>Accuracy 90.54%</td>
                <td>Burn depth</td>
              </tr>
              <tr valign="top">
                <td>Despo et al [<xref ref-type="bibr" rid="ref23">23</xref>]</td>
                <td>749 images</td>
                <td>AlexNet, VGG16, GoogleNet</td>
                <td>Accuracy 85%</td>
                <td>Burn area segmentation, burn depth</td>
              </tr>
              <tr valign="top">
                <td>Jiao et al [<xref ref-type="bibr" rid="ref24">24</xref>]</td>
                <td>1000 images</td>
                <td>Mask R-CNN</td>
                <td>DC<sup>d</sup> 84.51%</td>
                <td>Burn area segmentation</td>
              </tr>
              <tr valign="top">
                <td>Our study</td>
                <td>2591 images</td>
                <td>Mask R-CNN, U-Net</td>
                <td>DC 94%</td>
                <td>Estimation of burn %TBSA<sup>e</sup></td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>SVM: support vector machine.</p>
            </fn>
            <fn id="table1fn2">
              <p><sup>b</sup>KNN: K-nearest neighbor.</p>
            </fn>
            <fn id="table1fn3">
              <p><sup>c</sup>MDS: multidimensional scaling.</p>
            </fn>
            <fn id="table1fn4">
              <p><sup>d</sup>DC: Dice coefficient.</p>
            </fn>
            <fn id="table1fn5">
              <p><sup>e</sup>%TBSA: percentage total body surface area.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <sec>
          <title>Small Regions of Images</title>
          <p>The most common method of addressing different burn depths in a given image is to select small regions of the image, called boxes, for processing. These small boxes are then transformed into a red/green/blue (RGB) matrix in a color coordinate system. The relative distance of each of the pixels from the others is then calculated and a threshold is set to check whether the box is homogeneous in texture and color. Homogeneous boxes are classified into different burn depths and input for machine learning.</p>
          <p>Acha and Serrano collected 62 images of burn wounds with a resolution of 1536×1024 pixels. They selected regions of only 49×49 pixels from the images and classified these small boxes into 5 appearances to yield 250 images. They input the data set into Fuzzy-ARTMAP for training. A neural network was then used to classify burns into the 3 aforementioned types of burn depths with a success rate of 82% to 88% [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>]. Later, they reduced the error rate from 1.6% to 0.7% by applying 5-fold cross-validation to the data sets and used support vector machine (SVM) to perform the classification [<xref ref-type="bibr" rid="ref19">19</xref>]. In 2 subsequent studies, they further applied multidimensional scaling combining SVM and k-nearest neighbor classification to predict the need for a skin graft, with success rates of 79.73% and 83.8%, respectively [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>].</p>
        </sec>
        <sec>
          <title>Continuous Monitoring</title>
          <p>Another method used to get the burn depths of a region corresponding to any specified pixels of the images of a burn wound is to record the wound from the time of injury to complete healing with the same protocol. Cirillo et al continuously collected images from the same burn wound until it healed [<xref ref-type="bibr" rid="ref22">22</xref>]. They were then able to draw lines on the image corresponding to healing time and divide the area into 4 types of burn depths. To be more precise, they used the method mentioned above to extract small regions of the images (676 regions of 224×224 pixels from 23 images of 3456×2304 pixels). They then input these square regions of interest (RoIs) into several pretrained convolutional neural network (CNN) models, such as VGG19, ResNet18, ResNet50, and ResNet101. ResNet101 showed the best classification results with an average accuracy of 0.8166.</p>
        </sec>
      </sec>
      <sec>
        <title>Goal of This Study</title>
        <p>The use of machine learning in burn diagnosis to classify burn depth is currently quite limited. Technologies, such as LDI and thermography, are readily available and far more commonly employed. The treatment of burn injury may last for days or months. Without the use of special technologies, burn depth can still be determined by clinical assessment during the course of treatment. Recently, CNNs have been used in burn diagnosis to segment burn wounds. Despo et al reported a mean intersection over union (IoU) of around 0.7 with a fully convolutional network (FCN) [<xref ref-type="bibr" rid="ref23">23</xref>]. Jiao et al reported a mean Dice coefficient (DC) of 0.85 with Mask R-CNN [<xref ref-type="bibr" rid="ref24">24</xref>]. Such segmentation results could further be used to calculate %TBSA. This is important because all formulae for emergent fluid resuscitation (eg, the Parkland formula = %TBSA × body weight × 4) and calorie needs (eg, the Curreri formula = 25 × body weight + 40 × %TBSA) are based on %TBSA.</p>
        <p>In this study, we implemented deep learning models to segment burn wounds and perform conversion to %TBSA based on the number of pixels. We tried to decrease the human error of estimating an area of irregular shape by inspection. We aimed to help medical staff obtain accurate formulae to aid in making decisions about triage, acute management, and transfer of burn patients.</p>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Image Acquisition</title>
        <p>This study was approved by the research ethics review committee of Far Eastern Hospital (number 109037-F). We reviewed the medical records of patients in Far Eastern Hospital from January 2016 to December 2019 with ICD9 codes 940-948, 983, and 994. We collected the images of burn wounds from their medical records and saved them as JPG files. These images were assigned random numbers for deidentification and were randomly presented to 2 out of 5 burn surgeons for labeling.</p>
      </sec>
      <sec>
        <title>Labeling and Processing</title>
        <p>Since many burn wounds have a mixture of different burn depths, the images were roughly classified into the following 3 categories: superficial/superficial partial burn, deep partial burn, and full thickness burn. Clinically, the color of superficial/superficial partial burns is red or pink, and the color of deep partial burns is dark pink to blotchy red. Blistering is common in superficial partial burns and is also present in deep partial burns of a relatively large size. Full thickness burns are white, waxy, or charred without blisters. All images were co-labeled by 2 burn surgeons to yield a single consensus result. The margins of the burn wounds were labeled without regard to burn depth with the labeling tool <italic>LabelMe</italic> and saved as JSON files. A burn wound image was excluded if the wound was on the face; it involved tattooed skin; it was coated with burn ointment; it appeared to have undergone an intervention, such as debridement or skin graft; or no agreement was reached on the margin of the burn wound by the 2 burn surgeons.</p>
        <p>Since the images of burn wounds were collected from various medical records, their sizes were not uniform and ranged from 4000×3000 to 2736×1824 to 2592×1944 pixels. All labeled images were resized to 512×512 pixels. The data set of burn wounds was randomly split in a ratio of 8:1:1 into 3 sets for training, validation, and testing. We applied 2 deep learning architectures, U-Net and Mask R-CNN, in combination with 2 different backbones, ResNet50 and ResNet101, to segment these images.</p>
      </sec>
      <sec>
        <title>Evaluation Metrics</title>
        <p>The DC and IoU are 2 common metrics used to assess segmentation performance, whereas precision, recall, and accuracy are common metrics for assessing classification performance. The DC is twice the area of the intersection of the ground truth and prediction divided by the sum of their areas. It is given as follows:</p>
        <p>
          <disp-formula>
            <graphic xlink:href="medinform_v9i12e22798_fig9.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </disp-formula>
        </p>
        <p>where TP (true positive) denotes the number of correctly classified burn pixels, FP (false positive) denotes the number of mistakenly classified burn pixels, and FN (false negative) denotes the number of mistakenly classified nonburn pixels.</p>
        <p>The IoU denotes the area of the intersection of the ground truth and prediction divided by the area of their union. It is given as follows:</p>
        <p>
          <disp-formula>
            <graphic xlink:href="medinform_v9i12e22798_fig10.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </disp-formula>
        </p>
        <p>Precision is defined as the ratio of burn pixels that models correctly classified in all predicted pixels. It is also called positive predictive value and is given as follows:</p>
        <p>
          <disp-formula>Precision = TP / (TP + FP) <bold>(3)</bold></disp-formula>
        </p>
        <p>Recall is defined as the ratio of burn pixels that are correctly classified in all actual burn pixels. It is also called sensitivity and is given as follows:</p>
        <p>
          <disp-formula>Recall = TP / (TP + FN) <bold>(4)</bold></disp-formula>
        </p>
        <p>Accuracy denotes the percentage of correctly classified pixels. It is given as follows:</p>
        <p>
          <disp-formula>Accuracy = (TP + TN) / (TP + FP + TN + FN) <bold>(5)</bold></disp-formula>
        </p>
        <p>where TN (true negative) denotes the number of correctly classified nonburn pixels.</p>
      </sec>
      <sec>
        <title>Semantic Segmentation: U-Net</title>
        <p>The convolutions in the U-Net path can be replaced with a deep network framework, such as the ResNet framework, which can explore and learn more features from the data (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). Then, the networks can be initialized using pretrained model weights derived from large-scale object detection, segmentation, and captioning data sets such as ImageNet and COCO. In our case, we trained our model using 2 different backbones, ResNet101 and ResNet50, with weights from the pretrained ImageNet model (<xref ref-type="table" rid="table2">Table 2</xref>). The standard augmentations of images we used were rotations, shifts, scale, gaussian blur, and contrast normalization. The standard Dice loss was chosen as the loss function. The formula is as follows:</p>
        <p>
          <disp-formula>
            <graphic xlink:href="medinform_v9i12e22798_fig11.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </disp-formula>
        </p>
        <p>The <italic>ϵ</italic> term is used to avoid the issue of dividing by 0 when precision and recall are empty.</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Configuration of the models.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="380"/>
            <col width="400"/>
            <col width="220"/>
            <thead>
              <tr valign="top">
                <td>Variable</td>
                <td>Mask R-CNN</td>
                <td>U-Net</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Number of classes</td>
                <td>1</td>
                <td>1</td>
              </tr>
              <tr valign="top">
                <td>Backbone</td>
                <td>ResNet101 &#38; ResNet50</td>
                <td>ResNet101 &#38; ResNet50</td>
              </tr>
              <tr valign="top">
                <td>Regional proposal network anchor scales</td>
                <td>8, 16, 32, 64, 128</td>
                <td>N/A<sup>a</sup></td>
              </tr>
              <tr valign="top">
                <td>Train RoIs<sup>b</sup> per image, n</td>
                <td>128</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>Anchors per image, n</td>
                <td>256</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>Learning rate</td>
                <td>0.0001 (initial rate, change in different epochs)</td>
                <td>0.001</td>
              </tr>
              <tr valign="top">
                <td>Learning momentum</td>
                <td>0.9</td>
                <td>0.9</td>
              </tr>
              <tr valign="top">
                <td>Weight decay</td>
                <td>0.0001</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>Batch size</td>
                <td>8</td>
                <td>8</td>
              </tr>
              <tr valign="top">
                <td>Image dimensions</td>
                <td>512×512</td>
                <td>512×512</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table2fn1">
              <p><sup>a</sup>N/A: not applicable.</p>
            </fn>
            <fn id="table2fn2">
              <p><sup>b</sup>RoI: region of interest.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Instance Segmentation: Mask R-CNN</title>
        <p>In our implementation of Mask R-CNN, we trained our model using ResNet101 and ResNet50 with weights from the pretrained COCO model (<xref ref-type="table" rid="table1">Table 1</xref>). Mask R-CNN uses a multitask loss function given by L = L<sub>class</sub> + L<sub>box</sub> + L<sub>mask</sub> (<xref rid="figure1" ref-type="fig">Figure 1</xref>). The L<sub>class</sub> component contains the regional proposal network (RPN) class loss (failure of the RPN to separate object prediction from background) added to the Mask R-CNN class loss (failure of Mask R-CNN object classification). The L<sub>box</sub> component contains the RPN bounding box loss (failure of object localization or bounding by the RPN) added to the Mask R-CNN bounding box loss (failure of object localization or bounding by Mask R-CNN). The last component L<sub>mask</sub> loss constitutes the failure of Mask R-CNN object mask segmentation.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Mask R-CNN architecture with ResNet101. FPN: feature pyramid network; RoI: region of interest; RPN: regional proposal network.</p>
          </caption>
          <graphic xlink:href="medinform_v9i12e22798_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Burn Segmentation to %TBSA</title>
        <p>When the burn wounds are correctly segmented, the final step is to convert the pixels to %TBSA. To solve this problem, we applied the rule of hand/palm. The original rule is that a person’s hand with digits accounts for 1% TBSA. It is the most common method of estimating burn %TBSA [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref26">26</xref>]. Recent studies have shown that a hand without digits represents precisely 0.5% TBSA (the rule of palm) and a hand with digits should be adjusted to around 0.8% TBSA (the rule of hand) [<xref ref-type="bibr" rid="ref8">8</xref>]. If we use deep learning models to segment a patient’s burn wounds as well as hands, we can then convert the segmentation result of burn wounds into %TBSA.</p>
        <p>To produce the data set of hands and the data set of palms, we collected images of both volar hands from our colleagues. For each image, we labeled the hand with digits and without digits corresponding to the rule of hand and the rule of palm, respectively. These 2 data sets were split in a ratio of 8:1:1 into training, validation, and testing sets as well. The hand data set and the palm data set were processed according to the previous methods for burn wounds. The %TBSA of a burn wound can be calculated by comparing the mask area of the burn wound with the mask area of the hand or palm of the same patient. The formula is given by:</p>
        <p>
          <disp-formula>
            <graphic xlink:href="medinform_v9i12e22798_fig12.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </disp-formula>
        </p>
        <p>where M<sub>burn</sub> is the number of pixels of the masked burn area, M<sub>hand</sub> is the number of pixels of the masked hand area (0.8% TBSA), M<sub>palm</sub> is the number of pixels of the masked palm area (0.5% TBSA), D<sub>burn</sub> is the filming distance of the image of the patient’s burn wound, and D<sub>hand</sub> is the filming distance of the image of the patient’s hand.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Segmentation of Burn Wounds</title>
        <p>There were 3 data sets used in our study, 1 each for burn wounds, hands, and palms. For the burn wound data, we collected 3571 images from the medical records of Far Eastern Hospital, 980 of which were excluded (mostly because the burn wounds had undergone interventions, and some because they were coated with burn ointment). The 2591 selected images were labeled and included in the burn wound data set. Among these images, 2073 were used as the training set and 259 were used as the validation set. The remaining 259 images were preserved as the testing set.</p>
        <p>In our study, there was only 1 class in the ground truth. From the definitions of the DC and IoU, they have the relation of 1/2 × DC ≤ IoU ≤ DC and perfect positive correlation. We used DC as our main metric to evaluate segmentation performance because it penalizes false negatives more than IoU does, and it is better to overestimate burn size than underestimate it.</p>
        <p>Both U-Net and Mask R-CNN had better segmentation performance with the ResNet101 backbone than with ResNet50 (<xref ref-type="table" rid="table3">Table 3</xref> and <xref ref-type="table" rid="table4">Table 4</xref>). The improvement was obvious in U-Net (DC: 0.8545 vs 0.8077) but negligible in Mask R-CNN (DC: 0.9496 vs 0.9493). Under the same backbone, Mask R-CNN had better performance in burn wound segmentation and classification than U-Net. Mask R-CNN with ResNet101 had the best segmentation result with a DC of 0.9496.</p>
        <p><xref rid="figure2" ref-type="fig">Figures 2</xref>-<xref rid="figure4" ref-type="fig">4</xref> illustrate the performance of the 2 models in segmenting different burn depths. Both Mask R-CNN and U-Net showed poor segmentation results when they encountered small scattered burns (<xref rid="figure5" ref-type="fig">Figure 5</xref>).</p>
        <table-wrap position="float" id="table3">
          <label>Table 3</label>
          <caption>
            <p>Segmentation results of burn wounds with ResNet101.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="460"/>
            <col width="370"/>
            <col width="170"/>
            <thead>
              <tr valign="top">
                <td>Variable</td>
                <td>U-Net</td>
                <td>Mask R-CNN</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Mean DC<sup>a</sup></td>
                <td>0.8545</td>
                <td>0.9496</td>
              </tr>
              <tr valign="top">
                <td>Mean IoU<sup>b</sup></td>
                <td>0.7782</td>
                <td>0.9089</td>
              </tr>
              <tr valign="top">
                <td>Mean precision</td>
                <td>0.9041</td>
                <td>0.9613</td>
              </tr>
              <tr valign="top">
                <td>Mean recall</td>
                <td>0.8541</td>
                <td>0.9390</td>
              </tr>
              <tr valign="top">
                <td>Mean accuracy</td>
                <td>0.7893</td>
                <td>0.9130</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table3fn1">
              <p><sup>a</sup>DC: Dice coefficient.</p>
            </fn>
            <fn id="table3fn2">
              <p><sup>b</sup>IoU: intersection over union.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <table-wrap position="float" id="table4">
          <label>Table 4</label>
          <caption>
            <p>Segmentation results of burn wounds with ResNet50.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="460"/>
            <col width="370"/>
            <col width="170"/>
            <thead>
              <tr valign="top">
                <td>Variable</td>
                <td>U-Net</td>
                <td>Mask R-CNN</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Mean DC<sup>a</sup></td>
                <td>0.8077</td>
                <td>0.9493</td>
              </tr>
              <tr valign="top">
                <td>Mean IoU<sup>b</sup></td>
                <td>0.7190</td>
                <td>0.9075</td>
              </tr>
              <tr valign="top">
                <td>Mean precision</td>
                <td>0.8947</td>
                <td>0.9610</td>
              </tr>
              <tr valign="top">
                <td>Mean recall</td>
                <td>0.8002</td>
                <td>0.9382</td>
              </tr>
              <tr valign="top">
                <td>Mean accuracy</td>
                <td>0.7331</td>
                <td>0.9117</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table4fn1">
              <p><sup>a</sup>DC: Dice coefficient.</p>
            </fn>
            <fn id="table4fn2">
              <p><sup>b</sup>IoU: intersection over union.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Superficial partial burn. A: original photo; B: ground truth; C: result of Mask R-CNN; D: result of U-Net.</p>
          </caption>
          <graphic xlink:href="medinform_v9i12e22798_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Deep partial burn. A: original photo; B: ground truth; C: result of Mask R-CNN; D: result of U-Net.</p>
          </caption>
          <graphic xlink:href="medinform_v9i12e22798_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>Full thickness burn. A: original photo; B: ground truth; C: result of Mask R-CNN; D: result of U-Net.</p>
          </caption>
          <graphic xlink:href="medinform_v9i12e22798_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure5" position="float">
          <label>Figure 5</label>
          <caption>
            <p>Small scattered burns. A: original photo; B: ground truth; C: result of Mask R-CNN; D: result of U-Net.</p>
          </caption>
          <graphic xlink:href="medinform_v9i12e22798_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Segmentation of Hands and Palms</title>
        <p>A total of 400 images of both volar hands were collected and labeled. The male-to-female ratio was 193:207. Since U-Net and Mask R-CNN both performed better with the ResNet101 backbone than with the ResNet50 backbone in the burn wound segmentation, only ResNet101 was applied in the segmentation of the hand and palm data sets.</p>
        <p>Contrary to the burn wound results, U-Net had slightly better overall performance in the segmentation of the hands and palms than Mask R-CNN (<xref ref-type="table" rid="table5">Table 5</xref> and <xref ref-type="table" rid="table6">Table 6</xref>). For hand segmentation, U-Net had a DC of 0.9920 and Mask R-CNN had a DC of 0.9692. For palm segmentation, the difference was not as obvious with a DC of 0.9910 versus 0.9803. <xref rid="figure6" ref-type="fig">Figure 6</xref> provides a representative example of the segmentation of a particular hand by both U-Net and Mask R-CNN, while <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref> provides an example for a palm.</p>
        <table-wrap position="float" id="table5">
          <label>Table 5</label>
          <caption>
            <p>Segmentation results for hands with ResNet101.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="460"/>
            <col width="370"/>
            <col width="170"/>
            <thead>
              <tr valign="top">
                <td>Variable</td>
                <td>U-Net</td>
                <td>Mask R-CNN</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Mean DC<sup>a</sup></td>
                <td>0.9920</td>
                <td>0.9692</td>
              </tr>
              <tr valign="top">
                <td>Mean IoU<sup>b</sup></td>
                <td>0.9842</td>
                <td>0.9405</td>
              </tr>
              <tr valign="top">
                <td>Mean precision</td>
                <td>0.9906</td>
                <td>0.9657</td>
              </tr>
              <tr valign="top">
                <td>Mean recall</td>
                <td>0.9935</td>
                <td>0.9728</td>
              </tr>
              <tr valign="top">
                <td>Mean accuracy</td>
                <td>0.9933</td>
                <td>0.9407</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table5fn1">
              <p><sup>a</sup>DC: Dice coefficient.</p>
            </fn>
            <fn id="table5fn2">
              <p><sup>b</sup>IoU: intersection over union.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <table-wrap position="float" id="table6">
          <label>Table 6</label>
          <caption>
            <p>Segmentation results for palms with ResNet101.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="460"/>
            <col width="370"/>
            <col width="170"/>
            <thead>
              <tr valign="top">
                <td>Variable</td>
                <td>U-Net</td>
                <td>Mask R-CNN</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Mean DC<sup>a</sup></td>
                <td>0.9910</td>
                <td>0.9803</td>
              </tr>
              <tr valign="top">
                <td>Mean IoU<sup>b</sup></td>
                <td>0.9822</td>
                <td>0.9614</td>
              </tr>
              <tr valign="top">
                <td>Mean precision</td>
                <td>0.9904</td>
                <td>0.9836</td>
              </tr>
              <tr valign="top">
                <td>Mean recall</td>
                <td>0.9916</td>
                <td>0.9770</td>
              </tr>
              <tr valign="top">
                <td>Mean accuracy</td>
                <td>0.9878</td>
                <td>0.9615</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table6fn1">
              <p><sup>a</sup>DC: Dice coefficient.</p>
            </fn>
            <fn id="table6fn2">
              <p><sup>b</sup>IoU: intersection over union.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <fig id="figure6" position="float">
          <label>Figure 6</label>
          <caption>
            <p>Segmentation of the hand. A: original photo; B: ground truth; C: result of Mask R-CNN; D: result of U-Net.</p>
          </caption>
          <graphic xlink:href="medinform_v9i12e22798_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Burn Segmentation to %TBSA</title>
        <p>In the last part of our study, we designed a test to compare the estimation of the percentage of TBSA burned according to surgeons and Mask R-CNN. Photos of the abdomen, left thigh, left leg, right leg, and left hand of a patient were taken from the same distance (<xref rid="figure7" ref-type="fig">Figure 7</xref>). Images of the burn wounds and of the hands were co-labeled by 2 surgeons as ground truth. The previously trained Mask R-CNN with the ResNet101 backbone was used to calculate the %TBSA of each wound. Then, pictures of the burn wounds and the hands were given to 5 burn surgeons, and they gave their respective estimations of %TBSA. The results of each surgeon, ground truth, and Mask R-CNN are shown in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>. The ground truth was a pixel-based calculation (abdomen: 2.07%, thigh: 2.06%, right leg and knee: 2.64%, and left leg: 2.85%). Mask R-CNN had a smaller average deviation (0.115% TBSA) from ground truth than all of the burn surgeons (0.45%-1.14% TBSA; <xref rid="figure8" ref-type="fig">Figure 8</xref>).</p>
        <fig id="figure7" position="float">
          <label>Figure 7</label>
          <caption>
            <p>A1-A5: original image of the left hand, abdomen, left thigh, right leg, and left leg. B1-B5: labeled images as ground truth.</p>
          </caption>
          <graphic xlink:href="medinform_v9i12e22798_fig7.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure8" position="float">
          <label>Figure 8</label>
          <caption>
            <p>Differences between ground truth and estimated %TBSA of Mask R-CNN and burn surgeons at various burn sites. %TBSA: percentage total body surface area.</p>
          </caption>
          <graphic xlink:href="medinform_v9i12e22798_fig8.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Data Sets</title>
        <p>Studies of machine learning in burn diagnosis are relatively rare, because there are challenges in establishing accurate data sets. To begin with, unlike medical images from X-ray or computed tomography (CT) scans, images of burn wounds are not acquired under a standard protocol. Images of burn wounds are acquired using different equipment under various circumstances, such as illumination conditions, distance to the patient, and the background scene. These factors make it difficult to achieve a uniform standard of labeling and annotation.</p>
        <p>Next, the numbers of burn images compared with other open image data sets, such as MNIST (70,000 images) and CIFAR-10 (60,000 images), are limited. In recent studies of burn wound segmentation, Despo et al used 656 images for training [<xref ref-type="bibr" rid="ref23">23</xref>] and Jiao used 1000 images for training [<xref ref-type="bibr" rid="ref24">24</xref>]. We used 2332 labeled images from all burn depths for training and 259 images for testing. Images of burn wounds are difficult to collect. Unlike cancer imaging archives, there are no high-quality open data sets of images of burn wounds. This may be because complete deidentification of these images is not possible. Researchers are asked not to publish these images as open data sets due to patient privacy. Researchers from different medical facilities are not permitted to share the images with each other as well. Under these circumstances, federated learning to form a global model may be a feasible method to improve the accuracy of different individual models. The concept of federated learning is to share only the weights and bias of different models without sharing data sets [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>].</p>
        <p>In addition, burn wounds, unlike tumors that are detected on magnetic resonance imaging (MRI) images, are not commonly sampled for biopsy to confirm diagnosis. For any pixels on the images, if no other diagnostic technology is used, the true burn depths are hard to ascertain. The images, even when labeled by burn specialists, are relative ground truth only. A given image may receive many different labels when assessed by many doctors.</p>
        <p>Finally, many burn wounds have a mixture of several burn depths. If the object of deep learning is to build a burn depth classifier, most images cannot be included for training. Images of burn wounds require preprocessing as discussed previously in the methods.</p>
        <p>In the early work of our study, we tried to build a burn depth classifier. We divided the images of burn wounds into the following 4 categories based on burn depth: superficial (112 images), superficial partial (201 images), deep partial (165 images), and full thickness (170 images). We imported the data set into IBM Visual Insights (previously PowerAI Vision), a tool that can train models to do the classification task. We did data augmentation to enlarge the data set and improve generalization. Then, we chose pretrained GoogLeNet as our network structure. This model showed decent results, with a mean accuracy of 93% (<xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>). However, some images in the category “superficial partial” had regions with other burn depths as well. The confusion matrix showed more false negative results in this group than in the others (<xref ref-type="supplementary-material" rid="app5">Multimedia Appendix 5</xref>). Hence, the accuracy of the model as a burn depth classifier largely depended on the burn wound images collected.</p>
        <p>The abovementioned confounding factors also had an impact in previous studies of machine learning used to segment images of burn wounds. In the study by Despo et al, the margins of burn wounds on images were labeled by a surgeon. Then, every image was annotated to 1 severity of burn depth. Since the burn wound depths were not homogeneous, accuracy and IoU were greater in partial thickness burns [<xref ref-type="bibr" rid="ref23">23</xref>]. In our study, we also faced the same challenges. Initially, every image was labeled by 2 burn surgeons to obtain 2 labeled images. When the burn wounds had multiple burn depths, the labeled areas of the 2 surgeons had more discrepancy. When we input the discrepantly labeled images to train the models, they resulted in a good mask of the overall burn area but an incorrect classification of burn depth segmentation (<xref ref-type="supplementary-material" rid="app6">Multimedia Appendix 6</xref> and <xref ref-type="supplementary-material" rid="app7">Multimedia Appendix 7</xref>). Zhang et al reported an interesting finding [<xref ref-type="bibr" rid="ref29">29</xref>]. When they input randomly labeled objects or random pixels, after 10,000 steps, their neural network models still converged to fit the training set perfectly. The neural networks were rich enough to memorize bad training data. Yet, their results on testing data sets were poor. To avoid the problem of ambiguous ground truth, we modified the method so that only the burn wound margin was co-labeled by the 2 burn surgeons. This was because the ground truth of the margins had the highest consensus and because all formulae used for burn resuscitation only involved total burn area, which is equivalent to burn margin and is not related to burn depth.</p>
      </sec>
      <sec>
        <title>Segmentation Results</title>
        <p>We chose U-Net and Mask R-CNN as our main models for segmentation of burn wounds and hands because they are both popular and well-developed CNN models. Although they have different architectures and use different loss functions, their segmentation output seems similar. U-Net outputs semantic segmentation, and it is the most common segmentation model in the medical field [<xref ref-type="bibr" rid="ref30">30</xref>]. U-Net has been deployed in the evaluation of various sources of medical images, such as positron emission tomography (PET) scans of brain lesions [<xref ref-type="bibr" rid="ref31">31</xref>], microscopy images of cells [<xref ref-type="bibr" rid="ref32">32</xref>], CT scans of thoracic organs [<xref ref-type="bibr" rid="ref33">33</xref>], and MRI scans of breast lesions [<xref ref-type="bibr" rid="ref34">34</xref>]. Mask R-CNN was developed by Facebook AI Research, and it outputs object detection with instance segmentation [<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]. Mask R-CNN began getting attention in the medical field in 2018. It has been deployed in the analysis of various sources of medical images as well, such as PET scans of lung lesions [<xref ref-type="bibr" rid="ref37">37</xref>], sonographic images of breast lesions [<xref ref-type="bibr" rid="ref38">38</xref>], and MRI scans of knee injuries [<xref ref-type="bibr" rid="ref39">39</xref>].</p>
        <p>Previous studies have also applied these 2 models. Vuola et al reported a study of nuclei segmentation of microscopy images. U-Net had a better DC and created more accurate segmentation masks. Mask R-CNN had better recall and precision, and could detect nuclei more accurately but struggled to predict a good segmentation mask [<xref ref-type="bibr" rid="ref40">40</xref>]. Zhao et al reported a study of tree canopy segmentation of aerial images. Mask R-CNN performed better in segmentation as well as in tree detection [<xref ref-type="bibr" rid="ref41">41</xref>]. Bouget et al reported a study of thoracic structure segmentation combining 2 models. Mask R-CNN had the weakness of underestimating structural boundaries, and it required a longer training time. U-Net had the weakness of spatial inconsistency when compiling 2D segmentation results into 3D [<xref ref-type="bibr" rid="ref42">42</xref>]. In our study, Mask R-CNN was better at burn wound segmentation, while U-Net was better at hand segmentation. We believe that when the segmented objects have similar shape and size, such as with nuclei, hands, and palms, U-Net can achieve better segmentation results than Mask R-CNN. Mask R-CNN had to take into account the loss function components from estimating the bounding box and class, not just the mask. The weights of the bounding box and class components are calculated prior to the weight of the mask component in order to get accurate instance location. Huang et al proposed a modified Mask R-CNN to improve mask prediction [<xref ref-type="bibr" rid="ref43">43</xref>].</p>
        <p>However, the performance of U-Net in burn wound segmentation was not as good as that of Mask R-CNN. The burn wounds comprised 3 types of burn depths with various colors, hues, and textures, and were also of irregular shape and different sizes. Because it lacks the RPN function of Mask R-CNN, U-Net may not have the volume to “memorize” all the features of burn wounds by convolution and de-convolution. In the Kaggle science bowl, both U-Net and Mask R-CNN achieved excellent results after fine tuning. Hence, the performance of the 2 models may depend on the segmentation task, the data sets, and fine tuning.</p>
        <p>The segmentation result is not the only consideration. There are other comparative pros and cons of these 2 models. If a model is deployed in mobile devices, time consumption for prediction is an important factor. In our study, it took less time for U-Net (0.035 s/image) to do the prediction than for Mask R-CNN (0.175 s/image). The total time needed to train Mask R-CNN was about 1.5 times that needed to train U-Net. In addition, semantic segmentation involves direct pixel classification. If the objective is to calculate the total burn area, U-Net is capable of producing good results. If we want to segment different types of wounds on the same images, such as incisions and abrasions, Mask R-CNN can provide classification confidence in each of the RoIs, not just the masks.</p>
        <p>Both U-Net and Mask R-CNN can segment burn wounds of any burn depths (<xref rid="figure2" ref-type="fig">Figures 2</xref>-<xref rid="figure4" ref-type="fig">4</xref>). The segmentation result was more satisfactory when areas were large and confluent (<xref rid="figure4" ref-type="fig">Figure 4</xref>). If the burn wound (pixels) was small, the segmentation results of both models were not satisfactory (<xref rid="figure5" ref-type="fig">Figure 5</xref>). This is because a small area is susceptible to resizing, convolution, and max pooling. Similar observations were reported by Bouget et al, when they segmented structures inside the chest wall [<xref ref-type="bibr" rid="ref42">42</xref>]. Large structures, such as the heart, lungs, and spine, had a DC of more than 0.95. Small structures, such as lymph nodes, had a DC of only around 0.41. In the study by Vuola et al, they removed the very small masks (under 10 pixels) to improve the prediction [<xref ref-type="bibr" rid="ref40">40</xref>]. Fortunately, small and scattered burns are less critical clinically.</p>
      </sec>
      <sec>
        <title>Conversion of Segmentation Mask to %TBSA</title>
        <p>There exist other methods for converting a segmentation mask to %TBSA. One approach is to acquire the actual burn area (eg, 225 cm<sup>2</sup>) by calculating the relation of pixels of the mask area on the image and the distance from the wound to the camera. The next step is to calculate the body surface area (BSA; eg, 17,525 cm<sup>2</sup>) via the patient’s body weight, height, and gender. The %TBSA of the burn wound can be calculated by dividing these 2 numbers. Although this approach seems straightforward, there are more than 25 formulae to estimate BSA based on studies of different populations [<xref ref-type="bibr" rid="ref44">44</xref>]. When it comes to child BSA, we need completely different formulae for calculation, again with various degrees of accuracy [<xref ref-type="bibr" rid="ref45">45</xref>].</p>
        <p>We adopted the rule of hand/palm as a guide to estimate %TBSA, because the rule of hand/palm shows very little difference between racial groups, genders, BMI, and ages [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref46">46</xref>]. The rule of hand/palm can also be used in children and infants, where it is closer to the original 1% TBSA rule. Moreover, thumbprints, which are approximately 1/30 TBSA, can also be used as a guide to estimate areas of small burns [<xref ref-type="bibr" rid="ref47">47</xref>]. In our study, only 17 images were burn injuries involving the volar hand. We therefore collected images of healthy hands from our colleagues rather than using burned hands to train the models.</p>
        <p>In the last stage of our study, we conducted a test to compare the %TBSA estimated by burn surgeons and by Mask R-CNN with a ResNet101 backbone. Mask R-CNN had less variance from ground truth on average. It is very important to have a small deviation on every estimation. If a patient has multiple burn sites, the errors from each wound may add up to become a large deviation. In a study by Parvizi et al, the difference in estimation by inspection across burn experts was found to be as large as 16.5% TBSA in an adult patient and 31.5% TBSA in a child patient, which resulted in great volume differences in the estimation of fluid needed for resuscitation [<xref ref-type="bibr" rid="ref10">10</xref>]. Our method was aimed to derive similar estimates when the same burn wound was estimated by different burn experts by inspection, such as by teleconsultation. In reality, burn surgeons would typically visit patients and calculate the area more meticulously. Additionally, the burn area would be recalculated in the days following the burn injury. Theoretically, the variability among estimations would be less than when the burn area is estimated just by inspecting an image of the burn wound.</p>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>The data set of burn wounds was collected from a single medical center in Taiwan. Although it is currently the largest data set, the number of training images was small. The models require more input images to improve accuracy.</p>
        <p>Our deep learning models can segment a burn wound of any burn depth. However, they are unable to classify burn depths on segmentation. This is so because the ground truth of burn depths is hard to define by burn surgeons consistently. Further study may apply machine learning to assist in burn depth labeling before input for training.</p>
        <p>We used normal hands as a template to calculate the %TBSA burned. When a patient had burns involving both hands, our models could still segment the burned hands. Since children’s hands are shaped similarly to those of adults, our models can presumably also segment the hands of children (<xref ref-type="supplementary-material" rid="app8">Multimedia Appendix 8</xref>). However, we did not collect enough images to directly assess accuracy in these circumstances.</p>
        <p>Our data set did not include burn wounds from patients with markedly different skin tones. We hypothesize that the deep learning models will accurately detect burn wounds when the burn injury is more severe than superficial second degree, where the skin layers that are deeper than the pigment cells are disrupted. For example, a superficial second-degree burn injury with ruptured bullae shows a similar shade of pink even on different skin tones. Yet, skin tone will definitely contribute to the performance of the models. Convolution layers and the RoI obtained by deep learning largely depend on the relationship with their adjacent pixels. To test our hypothesis, we collected 100 web scraping images of burn wounds from different skin tones and input them into our models for wound segmentation (<xref ref-type="supplementary-material" rid="app9">Multimedia Appendix 9</xref>). The results confirmed that our models performed well when the burn injury was more severe than superficial second degree. However, the segmentation results varied when the burn wound had no bullae formation or rupture (whether superficial second or first degree). To resolve this problem, we need more quality images to correlate skin tone with segmentation performance.</p>
        <p>Finally, burn wound images are 2D projections of 3D burn wounds, akin to the Mercator world map. Unlike the world map, the cross sections of the trunk and extremities of the human body are not just ellipses or circles. The distance of the camera from the wound bed can be adjusted for by a simple formula, but adjusting for the angle at which the photos are taken requires complex differential and integral formulae with multiple variables. To get the most accurate estimation of %TBSA, we suggest taking all photos at a constant distance of around 30 to 50 cm and holding the camera (cellphone) parallel to the wound bed to decrease the effect of the angle. Our study will further deploy models on images taken with a 3D camera to acquire more accurate results.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>To the best of our knowledge, this is the first study to determine the %TBSA of burn wounds with different deep learning models. Based on the rule of hand, %TBSA can be calculated by comparing segmentation masks of the burn wound and hand of a patient. In our study, Mask R-CNN with ResNet101 performed this task satisfactorily in comparison with burn surgeons. With the assistance of deep learning, the fluid resuscitation and nutritional needs of burn injury patients can be more precisely and accurately assessed.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>U-Net architecture, encoder, and decoder replaced with ResNet.</p>
        <media xlink:href="medinform_v9i12e22798_app1.png" xlink:title="PNG File , 107 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Segmentation of the palm. A: original photo; B: ground truth; C: result of Mask R-CNN; D: result of U-Net.</p>
        <media xlink:href="medinform_v9i12e22798_app2.png" xlink:title="PNG File , 1665 KB"/>
      </supplementary-material>
      <supplementary-material id="app3">
        <label>Multimedia Appendix 3</label>
        <p>Estimation of %TBSA burned according to 5 different burn surgeons (ground truth and Mask R-CNN). %TBSA: percentage total body surface area.</p>
        <media xlink:href="medinform_v9i12e22798_app3.png" xlink:title="PNG File , 150 KB"/>
      </supplementary-material>
      <supplementary-material id="app4">
        <label>Multimedia Appendix 4</label>
        <p>Mean accuracy of burn depth classification.</p>
        <media xlink:href="medinform_v9i12e22798_app4.png" xlink:title="PNG File , 114 KB"/>
      </supplementary-material>
      <supplementary-material id="app5">
        <label>Multimedia Appendix 5</label>
        <p>Confusion matrix of different subgroups.</p>
        <media xlink:href="medinform_v9i12e22798_app5.png" xlink:title="PNG File , 99 KB"/>
      </supplementary-material>
      <supplementary-material id="app6">
        <label>Multimedia Appendix 6</label>
        <p>A mix of all burn depths in a burn wound.</p>
        <media xlink:href="medinform_v9i12e22798_app6.png" xlink:title="PNG File , 1407 KB"/>
      </supplementary-material>
      <supplementary-material id="app7">
        <label>Multimedia Appendix 7</label>
        <p>Incorrect prediction of burn depths but correct prediction of total burn area.</p>
        <media xlink:href="medinform_v9i12e22798_app7.png" xlink:title="PNG File , 260 KB"/>
      </supplementary-material>
      <supplementary-material id="app8">
        <label>Multimedia Appendix 8</label>
        <p>A: adult hand burn; B: segmentation of adult hand burn; C: child hand burn; D: segmentation of child hand burn.</p>
        <media xlink:href="medinform_v9i12e22798_app8.png" xlink:title="PNG File , 1732 KB"/>
      </supplementary-material>
      <supplementary-material id="app9">
        <label>Multimedia Appendix 9</label>
        <p>Web scraping images of burn wounds on patients with markedly lighter and darker skin tone in comparison to our study population.</p>
        <media xlink:href="medinform_v9i12e22798_app9.png" xlink:title="PNG File , 1338 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">%TBSA</term>
          <def>
            <p>percentage total body surface area</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">BSA</term>
          <def>
            <p>body surface area</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">CNN</term>
          <def>
            <p>convolutional neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">CT</term>
          <def>
            <p>computed tomography</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">DC</term>
          <def>
            <p>Dice coefficient</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">IoU</term>
          <def>
            <p>intersection over union</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">LDI</term>
          <def>
            <p>laser Doppler imaging</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">MRI</term>
          <def>
            <p>magnetic resonance imaging</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">PET</term>
          <def>
            <p>positron emission tomography</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">RoI</term>
          <def>
            <p>region of interest</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">RPN</term>
          <def>
            <p>regional proposal network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">SVM</term>
          <def>
            <p>support vector machine</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This work was supported by the Innovation Project of Far Eastern Memorial Hospital (grant number PI20200002). We thank our colleagues in the Department of Surgery and the Department of Nursing (operating room, 13G ward) of Far Eastern Memorial Hospital and in the Graduate Institute of Biomedical Electronics &#38; Bioinformatics of National Taiwan University for the collection of the images of hands. We also thank Shih-Chen Huang, who helped coordinate with the burn surgeons in the collection of the label data.</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Harish</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Raymond</surname>
              <given-names>AP</given-names>
            </name>
            <name name-style="western">
              <surname>Issler</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Lajevardi</surname>
              <given-names>SS</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Maitz</surname>
              <given-names>PK</given-names>
            </name>
            <name name-style="western">
              <surname>Kennedy</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Accuracy of burn size estimation in patients transferred to adult Burn Units in Sydney, Australia: an audit of 698 patients</article-title>
          <source>Burns</source>
          <year>2015</year>
          <month>02</month>
          <volume>41</volume>
          <issue>1</issue>
          <fpage>91</fpage>
          <lpage>9</lpage>
          <pub-id pub-id-type="doi">10.1016/j.burns.2014.05.005</pub-id>
          <pub-id pub-id-type="medline">24972983</pub-id>
          <pub-id pub-id-type="pii">S0305-4179(14)00170-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Baartmans</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>van Baar</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Boxma</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Dokter</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Tibboel</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Nieuwenhuis</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Accuracy of burn size assessment prior to arrival in Dutch burn centres and its consequences in children: a nationwide evaluation</article-title>
          <source>Injury</source>
          <year>2012</year>
          <month>09</month>
          <volume>43</volume>
          <issue>9</issue>
          <fpage>1451</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1016/j.injury.2011.06.027</pub-id>
          <pub-id pub-id-type="medline">21741042</pub-id>
          <pub-id pub-id-type="pii">S0020-1383(11)00264-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Resch</surname>
              <given-names>TR</given-names>
            </name>
            <name name-style="western">
              <surname>Drake</surname>
              <given-names>RM</given-names>
            </name>
            <name name-style="western">
              <surname>Helmer</surname>
              <given-names>SD</given-names>
            </name>
            <name name-style="western">
              <surname>Jost</surname>
              <given-names>GD</given-names>
            </name>
            <name name-style="western">
              <surname>Osland</surname>
              <given-names>JS</given-names>
            </name>
          </person-group>
          <article-title>Estimation of burn depth at burn centers in the United States</article-title>
          <source>Journal of Burn Care &#38; Research</source>
          <year>2014</year>
          <volume>35</volume>
          <issue>6</issue>
          <fpage>491</fpage>
          <lpage>497</lpage>
          <pub-id pub-id-type="doi">10.1097/bcr.0000000000000031</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jaskille</surname>
              <given-names>AD</given-names>
            </name>
            <name name-style="western">
              <surname>Shupp</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Jordan</surname>
              <given-names>MH</given-names>
            </name>
            <name name-style="western">
              <surname>Jeng</surname>
              <given-names>JC</given-names>
            </name>
          </person-group>
          <article-title>Critical review of burn depth assessment techniques: Part I. Historical review</article-title>
          <source>Journal of Burn Care &#38; Research</source>
          <year>2009</year>
          <volume>30</volume>
          <issue>6</issue>
          <fpage>937</fpage>
          <lpage>947</lpage>
          <pub-id pub-id-type="doi">10.1097/bcr.0b013e3181c07f21</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Monstrey</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hoeksema</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Verbelen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Pirayesh</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Blondeel</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Assessment of burn depth and burn wound healing potential</article-title>
          <source>Burns</source>
          <year>2008</year>
          <month>09</month>
          <volume>34</volume>
          <issue>6</issue>
          <fpage>761</fpage>
          <lpage>9</lpage>
          <pub-id pub-id-type="doi">10.1016/j.burns.2008.01.009</pub-id>
          <pub-id pub-id-type="medline">18511202</pub-id>
          <pub-id pub-id-type="pii">S0305-4179(08)00041-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jaspers</surname>
              <given-names>ME</given-names>
            </name>
            <name name-style="western">
              <surname>van Haasterecht</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>van Zuijlen</surname>
              <given-names>PP</given-names>
            </name>
            <name name-style="western">
              <surname>Mokkink</surname>
              <given-names>LB</given-names>
            </name>
          </person-group>
          <article-title>A systematic review on the quality of measurement techniques for the assessment of burn wound depth or healing potential</article-title>
          <source>Burns</source>
          <year>2019</year>
          <month>03</month>
          <volume>45</volume>
          <issue>2</issue>
          <fpage>261</fpage>
          <lpage>281</lpage>
          <pub-id pub-id-type="doi">10.1016/j.burns.2018.05.015</pub-id>
          <pub-id pub-id-type="medline">29941159</pub-id>
          <pub-id pub-id-type="pii">S0305-4179(18)30401-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thatcher</surname>
              <given-names>JE</given-names>
            </name>
            <name name-style="western">
              <surname>Squiers</surname>
              <given-names>JJ</given-names>
            </name>
            <name name-style="western">
              <surname>Kanick</surname>
              <given-names>SC</given-names>
            </name>
            <name name-style="western">
              <surname>King</surname>
              <given-names>DR</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Mohan</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Sellke</surname>
              <given-names>EW</given-names>
            </name>
            <name name-style="western">
              <surname>DiMaio</surname>
              <given-names>JM</given-names>
            </name>
          </person-group>
          <article-title>Imaging techniques for clinical burn assessment with a focus on multispectral imaging</article-title>
          <source>Adv Wound Care (New Rochelle)</source>
          <year>2016</year>
          <month>08</month>
          <day>01</day>
          <volume>5</volume>
          <issue>8</issue>
          <fpage>360</fpage>
          <lpage>378</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/27602255"/>
          </comment>
          <pub-id pub-id-type="doi">10.1089/wound.2015.0684</pub-id>
          <pub-id pub-id-type="medline">27602255</pub-id>
          <pub-id pub-id-type="pii">10.1089/wound.2015.0684</pub-id>
          <pub-id pub-id-type="pmcid">PMC4991589</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thom</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Appraising current methods for preclinical calculation of burn size - A pre-hospital perspective</article-title>
          <source>Burns</source>
          <year>2017</year>
          <month>02</month>
          <volume>43</volume>
          <issue>1</issue>
          <fpage>127</fpage>
          <lpage>136</lpage>
          <pub-id pub-id-type="doi">10.1016/j.burns.2016.07.003</pub-id>
          <pub-id pub-id-type="medline">27575669</pub-id>
          <pub-id pub-id-type="pii">S0305-4179(16)30208-X</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Neaman</surname>
              <given-names>KC</given-names>
            </name>
            <name name-style="western">
              <surname>Andres</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>McClure</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Burton</surname>
              <given-names>ME</given-names>
            </name>
            <name name-style="western">
              <surname>Kemmeter</surname>
              <given-names>PR</given-names>
            </name>
            <name name-style="western">
              <surname>Ford</surname>
              <given-names>RD</given-names>
            </name>
          </person-group>
          <article-title>A new method for estimation of involved BSAs for obese and normal-weight patients with burn injury</article-title>
          <source>Journal of Burn Care &#38; Research</source>
          <year>2011</year>
          <volume>32</volume>
          <issue>3</issue>
          <fpage>421</fpage>
          <lpage>428</lpage>
          <pub-id pub-id-type="doi">10.1097/bcr.0b013e318217f8c6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Parvizi</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Kamolz</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Giretzlehner</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Haller</surname>
              <given-names>HL</given-names>
            </name>
            <name name-style="western">
              <surname>Trop</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Selig</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Nagele</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Lumenta</surname>
              <given-names>DB</given-names>
            </name>
          </person-group>
          <article-title>The potential impact of wrong TBSA estimations on fluid resuscitation in patients suffering from burns: things to keep in mind</article-title>
          <source>Burns</source>
          <year>2014</year>
          <month>03</month>
          <volume>40</volume>
          <issue>2</issue>
          <fpage>241</fpage>
          <lpage>5</lpage>
          <pub-id pub-id-type="doi">10.1016/j.burns.2013.06.019</pub-id>
          <pub-id pub-id-type="medline">24050977</pub-id>
          <pub-id pub-id-type="pii">S0305-4179(13)00202-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kwon</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hong</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Choi</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Baik</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Jeong</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Koo</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Oh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yi</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Detection of atrial fibrillation using a ring-type wearable device (CardioTracker) and deep learning analysis of photoplethysmography signals: prospective observational proof-of-concept study</article-title>
          <source>J Med Internet Res</source>
          <year>2020</year>
          <month>05</month>
          <day>21</day>
          <volume>22</volume>
          <issue>5</issue>
          <fpage>e16443</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2020/5/e16443/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/16443</pub-id>
          <pub-id pub-id-type="medline">32348254</pub-id>
          <pub-id pub-id-type="pii">v22i5e16443</pub-id>
          <pub-id pub-id-type="pmcid">PMC7273241</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Adam</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Rampášek</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Safikhani</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Smirnov</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Haibe-Kains</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Goldenberg</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Machine learning approaches to drug response prediction: challenges and recent progress</article-title>
          <source>NPJ Precis Oncol</source>
          <year>2020</year>
          <month>6</month>
          <day>15</day>
          <volume>4</volume>
          <issue>1</issue>
          <fpage>19</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41698-020-0122-1"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41698-020-0122-1</pub-id>
          <pub-id pub-id-type="medline">32566759</pub-id>
          <pub-id pub-id-type="pii">122</pub-id>
          <pub-id pub-id-type="pmcid">PMC7296033</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kanavati</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Toyokawa</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Momosaki</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rambeau</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kozuma</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Shoji</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Yamazaki</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Takeo</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Iizuka</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Tsuneki</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Weakly-supervised learning for lung carcinoma classification using deep learning</article-title>
          <source>Sci Rep</source>
          <year>2020</year>
          <month>06</month>
          <day>09</day>
          <volume>10</volume>
          <issue>1</issue>
          <fpage>9297</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-020-66333-x"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-020-66333-x</pub-id>
          <pub-id pub-id-type="medline">32518413</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-020-66333-x</pub-id>
          <pub-id pub-id-type="pmcid">PMC7283481</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Han</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Johnson</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Gaed</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gómez</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Moussa</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Chin</surname>
              <given-names>JL</given-names>
            </name>
            <name name-style="western">
              <surname>Pautler</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bauman</surname>
              <given-names>GS</given-names>
            </name>
            <name name-style="western">
              <surname>Ward</surname>
              <given-names>AD</given-names>
            </name>
          </person-group>
          <article-title>Histologic tissue components provide major cues for machine learning-based prostate cancer detection and grading on prostatectomy specimens</article-title>
          <source>Sci Rep</source>
          <year>2020</year>
          <month>06</month>
          <day>18</day>
          <volume>10</volume>
          <issue>1</issue>
          <fpage>9911</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-020-66849-2"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-020-66849-2</pub-id>
          <pub-id pub-id-type="medline">32555410</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-020-66849-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC7303108</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kanevsky</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Corban</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Gaster</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Kanevsky</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gilardino</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Big data and machine learning in plastic surgery</article-title>
          <source>Plastic and Reconstructive Surgery</source>
          <year>2016</year>
          <volume>137</volume>
          <issue>5</issue>
          <fpage>890e</fpage>
          <lpage>897e</lpage>
          <pub-id pub-id-type="doi">10.1097/prs.0000000000002088</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>NT</given-names>
            </name>
            <name name-style="western">
              <surname>Salinas</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Machine learning in burn care and research: A systematic review of the literature</article-title>
          <source>Burns</source>
          <year>2015</year>
          <month>12</month>
          <volume>41</volume>
          <issue>8</issue>
          <fpage>1636</fpage>
          <lpage>1641</lpage>
          <pub-id pub-id-type="doi">10.1016/j.burns.2015.07.001</pub-id>
          <pub-id pub-id-type="medline">26233900</pub-id>
          <pub-id pub-id-type="pii">S0305-4179(15)00200-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Serrano</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Acha</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Gómez-Cía</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Acha</surname>
              <given-names>JI</given-names>
            </name>
            <name name-style="western">
              <surname>Roa</surname>
              <given-names>LM</given-names>
            </name>
          </person-group>
          <article-title>A computer assisted diagnosis tool for the classification of burns by depth of injury</article-title>
          <source>Burns</source>
          <year>2005</year>
          <month>05</month>
          <volume>31</volume>
          <issue>3</issue>
          <fpage>275</fpage>
          <lpage>81</lpage>
          <pub-id pub-id-type="doi">10.1016/j.burns.2004.11.019</pub-id>
          <pub-id pub-id-type="medline">15774281</pub-id>
          <pub-id pub-id-type="pii">S0305-4179(04)00345-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Acha</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Serrano</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Acha</surname>
              <given-names>JI</given-names>
            </name>
            <name name-style="western">
              <surname>Roa</surname>
              <given-names>LM</given-names>
            </name>
          </person-group>
          <article-title>Segmentation and classification of burn images by color and texture information</article-title>
          <source>J Biomed Opt</source>
          <year>2005</year>
          <volume>10</volume>
          <issue>3</issue>
          <fpage>034014</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1117/1.1921227"/>
          </comment>
          <pub-id pub-id-type="doi">10.1117/1.1921227</pub-id>
          <pub-id pub-id-type="medline">16229658</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Acha</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Sonka</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Serrano</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Palencia</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Murillo</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Classification of burn wounds using support vector machines</article-title>
          <year>2004</year>
          <conf-name>Medical Imaging 2004</conf-name>
          <conf-date>May 12, 2004</conf-date>
          <conf-loc>San Diego, CA</conf-loc>
          <pub-id pub-id-type="doi">10.1117/12.535491</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Acha</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Serrano</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Fondon</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Gomez-Cia</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Burn depth analysis using multidimensional scaling applied to psychophysical experiment data</article-title>
          <source>IEEE Trans. Med. Imaging</source>
          <year>2013</year>
          <month>6</month>
          <volume>32</volume>
          <issue>6</issue>
          <fpage>1111</fpage>
          <lpage>1120</lpage>
          <pub-id pub-id-type="doi">10.1109/tmi.2013.2254719</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Serrano</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Boloix-Tortosa</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Gómez-Cía</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Acha</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Features identification for automatic burn classification</article-title>
          <source>Burns</source>
          <year>2015</year>
          <month>12</month>
          <volume>41</volume>
          <issue>8</issue>
          <fpage>1883</fpage>
          <lpage>1890</lpage>
          <pub-id pub-id-type="doi">10.1016/j.burns.2015.05.011</pub-id>
          <pub-id pub-id-type="medline">26188898</pub-id>
          <pub-id pub-id-type="pii">S0305-4179(15)00140-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cirillo</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mirdell</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Sjöberg</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Pham</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Time-independent prediction of burn depth using deep convolutional neural networks</article-title>
          <source>J Burn Care Res</source>
          <year>2019</year>
          <month>10</month>
          <day>16</day>
          <volume>40</volume>
          <issue>6</issue>
          <fpage>857</fpage>
          <lpage>863</lpage>
          <pub-id pub-id-type="doi">10.1093/jbcr/irz103</pub-id>
          <pub-id pub-id-type="medline">31187119</pub-id>
          <pub-id pub-id-type="pii">5514072</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Despo</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Yeung</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Jopling</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Pridgen</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Sheckter</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Silberstein</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Fei-Fei</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Milstein</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <source>BURNED: Towards Efficient and Accurate Burn Prognosis Using Deep Learning</source>
          <year>2017</year>
          <access-date>2020-08-17</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://cs231n.stanford.edu/reports/2017/pdfs/507.pdf">http://cs231n.stanford.edu/reports/2017/pdfs/507.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jiao</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Su</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Xie</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Ye</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>Burn image segmentation based on Mask Regions with convolutional neural network deep learning framework: more accurate and more convenient</article-title>
          <source>Burns Trauma</source>
          <year>2019</year>
          <volume>7</volume>
          <fpage>6</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://burnstrauma.biomedcentral.com/articles/10.1186/s41038-018-0137-9"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s41038-018-0137-9</pub-id>
          <pub-id pub-id-type="medline">30859107</pub-id>
          <pub-id pub-id-type="pii">137</pub-id>
          <pub-id pub-id-type="pmcid">PMC6394103</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Giretzlehner</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Dirnberger</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Owen</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Haller</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Lumenta</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Kamolz</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>The determination of total burn surface area: How much difference?</article-title>
          <source>Burns</source>
          <year>2013</year>
          <month>09</month>
          <volume>39</volume>
          <issue>6</issue>
          <fpage>1107</fpage>
          <lpage>13</lpage>
          <pub-id pub-id-type="doi">10.1016/j.burns.2013.01.021</pub-id>
          <pub-id pub-id-type="medline">23566430</pub-id>
          <pub-id pub-id-type="pii">S0305-4179(13)00045-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Parvizi</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Giretzlehner</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wurzer</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Klein</surname>
              <given-names>LD</given-names>
            </name>
            <name name-style="western">
              <surname>Shoham</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Bohanon</surname>
              <given-names>FJ</given-names>
            </name>
            <name name-style="western">
              <surname>Haller</surname>
              <given-names>HL</given-names>
            </name>
            <name name-style="western">
              <surname>Tuca</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Branski</surname>
              <given-names>LK</given-names>
            </name>
            <name name-style="western">
              <surname>Lumenta</surname>
              <given-names>DB</given-names>
            </name>
            <name name-style="western">
              <surname>Herndon</surname>
              <given-names>DN</given-names>
            </name>
            <name name-style="western">
              <surname>Kamolz</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>BurnCase 3D software validation study: Burn size measurement accuracy and inter-rater reliability</article-title>
          <source>Burns</source>
          <year>2016</year>
          <month>03</month>
          <volume>42</volume>
          <issue>2</issue>
          <fpage>329</fpage>
          <lpage>35</lpage>
          <pub-id pub-id-type="doi">10.1016/j.burns.2016.01.008</pub-id>
          <pub-id pub-id-type="medline">26839051</pub-id>
          <pub-id pub-id-type="pii">S0305-4179(16)00011-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thapa</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Chamikara</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Camtepe</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Rehman</surname>
              <given-names>MH</given-names>
            </name>
            <name name-style="western">
              <surname>Gaber</surname>
              <given-names>MM</given-names>
            </name>
          </person-group>
          <article-title>Advancements of federated learning towards privacy preservation: from federated learning to split learning</article-title>
          <source>Federated Learning Systems. Studies in Computational Intelligence, vol 965</source>
          <year>2021</year>
          <publisher-loc>Cham</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>79</fpage>
          <lpage>109</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Myronenko</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Roth</surname>
              <given-names>HR</given-names>
            </name>
            <name name-style="western">
              <surname>Harmon</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Turkbey</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Turkbey</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Carrafiello</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Patella</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Cariati</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Obinata</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Mori</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Tamura</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>An</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Wood</surname>
              <given-names>BJ</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Federated semi-supervised learning for COVID region segmentation in chest CT using multi-national data from China, Italy, Japan</article-title>
          <source>Med Image Anal</source>
          <year>2021</year>
          <month>05</month>
          <volume>70</volume>
          <fpage>101992</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/33601166"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.media.2021.101992</pub-id>
          <pub-id pub-id-type="medline">33601166</pub-id>
          <pub-id pub-id-type="pii">S1361-8415(21)00038-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC7864789</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Bengio</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hardt</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Recht</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Vinyals</surname>
              <given-names>O</given-names>
            </name>
          </person-group>
          <article-title>Understanding deep learning (still) requires rethinking generalization</article-title>
          <source>Commun. ACM</source>
          <year>2021</year>
          <month>03</month>
          <volume>64</volume>
          <issue>3</issue>
          <fpage>107</fpage>
          <lpage>115</lpage>
          <pub-id pub-id-type="doi">10.1145/3446776</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ronneberger</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Fischer</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Brox</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Navab</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Hornegger</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wells</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Frangi</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>U-Net: Convolutional Networks for Biomedical Image Segmentation</article-title>
          <source>Medical Image Computing and Computer-Assisted Intervention – MICCAI 2015. MICCAI 2015. Lecture Notes in Computer Science, vol 9351</source>
          <year>2015</year>
          <publisher-loc>Cham</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>234</fpage>
          <lpage>241</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Blanc-Durand</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Van Der Gucht</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Schaefer</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Itti</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Prior</surname>
              <given-names>JO</given-names>
            </name>
          </person-group>
          <article-title>Automatic lesion detection and segmentation of 18F-FET PET in gliomas: A full 3D U-Net convolutional neural network study</article-title>
          <source>PLoS One</source>
          <year>2018</year>
          <month>4</month>
          <day>13</day>
          <volume>13</volume>
          <issue>4</issue>
          <fpage>e0195798</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0195798"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0195798</pub-id>
          <pub-id pub-id-type="medline">29652908</pub-id>
          <pub-id pub-id-type="pii">PONE-D-18-00368</pub-id>
          <pub-id pub-id-type="pmcid">PMC5898737</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fabijańska</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Segmentation of corneal endothelium images using a U-Net-based convolutional neural network</article-title>
          <source>Artif Intell Med</source>
          <year>2018</year>
          <month>06</month>
          <volume>88</volume>
          <fpage>1</fpage>
          <lpage>13</lpage>
          <pub-id pub-id-type="doi">10.1016/j.artmed.2018.04.004</pub-id>
          <pub-id pub-id-type="medline">29680687</pub-id>
          <pub-id pub-id-type="pii">S0933-3657(18)30057-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Lei</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Thomas</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Curran</surname>
              <given-names>WJ</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Automatic multiorgan segmentation in thorax CT images using U-net-GAN</article-title>
          <source>Med Phys</source>
          <year>2019</year>
          <month>05</month>
          <day>22</day>
          <volume>46</volume>
          <issue>5</issue>
          <fpage>2157</fpage>
          <lpage>2168</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/30810231"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/mp.13458</pub-id>
          <pub-id pub-id-type="medline">30810231</pub-id>
          <pub-id pub-id-type="pmcid">PMC6510589</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>VY</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Chan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Chow</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Luk</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kwong</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Su</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Automatic breast and fibroglandular tissue segmentation in breast MRI using deep learning by a fully-Convolutional Residual Neural Network U-Net</article-title>
          <source>Acad Radiol</source>
          <year>2019</year>
          <month>11</month>
          <volume>26</volume>
          <issue>11</issue>
          <fpage>1526</fpage>
          <lpage>1535</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/30713130"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.acra.2019.01.012</pub-id>
          <pub-id pub-id-type="medline">30713130</pub-id>
          <pub-id pub-id-type="pii">S1076-6332(19)30036-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC6669125</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ren</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Girshick</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Faster R-CNN: towards real-time object betection with region proposal networks</article-title>
          <source>IEEE Trans Pattern Anal Mach Intell</source>
          <year>2017</year>
          <month>06</month>
          <volume>39</volume>
          <issue>6</issue>
          <fpage>1137</fpage>
          <lpage>1149</lpage>
          <pub-id pub-id-type="doi">10.1109/TPAMI.2016.2577031</pub-id>
          <pub-id pub-id-type="medline">27295650</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>He</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Gkioxari</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Dollár</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Girshick</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Mask R-CNN</article-title>
          <year>2017</year>
          <conf-name>2017 IEEE International Conference on Computer Vision (ICCV)</conf-name>
          <conf-date>October 22-29, 2017</conf-date>
          <conf-loc>Venice, Italy</conf-loc>
          <fpage>2980</fpage>
          <lpage>2988</lpage>
          <pub-id pub-id-type="doi">10.1109/iccv.2017.322</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Multiscale Mask R-CNN-Based lung tumor detection using PET imaging</article-title>
          <source>Mol Imaging</source>
          <year>2019</year>
          <month>07</month>
          <day>31</day>
          <volume>18</volume>
          <fpage>1536012119863531</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/10.1177/1536012119863531?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub%3dpubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/1536012119863531</pub-id>
          <pub-id pub-id-type="medline">31364467</pub-id>
          <pub-id pub-id-type="pmcid">PMC6669841</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chiao</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Liao</surname>
              <given-names>KY</given-names>
            </name>
            <name name-style="western">
              <surname>Hsieh</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Detection and classification the breast tumors using mask R-CNN on sonograms</article-title>
          <source>Medicine</source>
          <year>2019</year>
          <volume>98</volume>
          <issue>19</issue>
          <fpage>e15200</fpage>
          <pub-id pub-id-type="doi">10.1097/md.0000000000015200</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Couteaux</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Si-Mohamed</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Nempont</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Lefevre</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Popoff</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Pizaine</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Villain</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Bloch</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Cotten</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Boussel</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Automatic knee meniscus tear detection and orientation classification with Mask-RCNN</article-title>
          <source>Diagn Interv Imaging</source>
          <year>2019</year>
          <month>04</month>
          <volume>100</volume>
          <issue>4</issue>
          <fpage>235</fpage>
          <lpage>242</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2211-5684(19)30058-0"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.diii.2019.03.002</pub-id>
          <pub-id pub-id-type="medline">30910620</pub-id>
          <pub-id pub-id-type="pii">S2211-5684(19)30058-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vuola</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Akram</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kannala</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Mask-RCNN and U-Net Ensembled for Nuclei Segmentation</article-title>
          <year>2019</year>
          <conf-name>2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019)</conf-name>
          <conf-date>April 8-11, 2019</conf-date>
          <conf-loc>Venice, Italy</conf-loc>
          <fpage>208</fpage>
          <lpage>212</lpage>
          <pub-id pub-id-type="doi">10.1109/isbi.2019.8759574</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Niu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Comparing U-Net convolutional network with mask R-CNN in the performances of pomegranate tree canopy segmentation</article-title>
          <year>2018</year>
          <conf-name>Proc. SPIE 10780, Multispectral, Hyperspectral, and Ultraspectral Remote Sensing Technology, Techniques and Applications VII</conf-name>
          <conf-date>December 21, 2018</conf-date>
          <conf-loc>Honolulu, HI</conf-loc>
          <pub-id pub-id-type="doi">10.1117/12.2325570</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bouget</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Jørgensen</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kiss</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Leira</surname>
              <given-names>HO</given-names>
            </name>
            <name name-style="western">
              <surname>Langø</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Semantic segmentation and detection of mediastinal lymph nodes and anatomical structures in CT data for lung cancer staging</article-title>
          <source>Int J Comput Assist Radiol Surg</source>
          <year>2019</year>
          <month>06</month>
          <volume>14</volume>
          <issue>6</issue>
          <fpage>977</fpage>
          <lpage>986</lpage>
          <pub-id pub-id-type="doi">10.1007/s11548-019-01948-8</pub-id>
          <pub-id pub-id-type="medline">30891655</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11548-019-01948-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Gong</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Mask Scoring R-CNN</article-title>
          <year>2019</year>
          <conf-name>2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name>
          <conf-date>June 15-20, 2019</conf-date>
          <conf-loc>Long Beach, CA</conf-loc>
          <fpage>6402</fpage>
          <lpage>6411</lpage>
          <pub-id pub-id-type="doi">10.1109/CVPR.2019.00657</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Redlarski</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Palkowski</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Krawczuk</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Body surface area formulae: an alarming ambiguity</article-title>
          <source>Sci Rep</source>
          <year>2016</year>
          <month>06</month>
          <day>21</day>
          <volume>6</volume>
          <issue>1</issue>
          <fpage>27966</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/srep27966"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/srep27966</pub-id>
          <pub-id pub-id-type="medline">27323883</pub-id>
          <pub-id pub-id-type="pii">srep27966</pub-id>
          <pub-id pub-id-type="pmcid">PMC4914842</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rumpf</surname>
              <given-names>RW</given-names>
            </name>
            <name name-style="western">
              <surname>Stewart</surname>
              <given-names>WC</given-names>
            </name>
            <name name-style="western">
              <surname>Martinez</surname>
              <given-names>SK</given-names>
            </name>
            <name name-style="western">
              <surname>Gerrard</surname>
              <given-names>CY</given-names>
            </name>
            <name name-style="western">
              <surname>Adolphi</surname>
              <given-names>NL</given-names>
            </name>
            <name name-style="western">
              <surname>Thakkar</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Coleman</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rajab</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ray</surname>
              <given-names>WC</given-names>
            </name>
            <name name-style="western">
              <surname>Fabia</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Comparison of the Lund and Browder table to computed tomography scan three-dimensional surface area measurement for a pediatric cohort</article-title>
          <source>J Surg Res</source>
          <year>2018</year>
          <month>01</month>
          <volume>221</volume>
          <fpage>275</fpage>
          <lpage>284</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0022-4804(17)30535-8"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jss.2017.08.019</pub-id>
          <pub-id pub-id-type="medline">29229139</pub-id>
          <pub-id pub-id-type="pii">S0022-4804(17)30535-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cox</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kriho</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>De Klerk</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>van Dijk</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Rode</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Total body and hand surface area: Measurements, calculations, and comparisons in ethnically diverse children in South Africa</article-title>
          <source>Burns</source>
          <year>2017</year>
          <month>11</month>
          <volume>43</volume>
          <issue>7</issue>
          <fpage>1567</fpage>
          <lpage>1574</lpage>
          <pub-id pub-id-type="doi">10.1016/j.burns.2017.04.012</pub-id>
          <pub-id pub-id-type="medline">28473269</pub-id>
          <pub-id pub-id-type="pii">S0305-4179(17)30235-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dargan</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Mandal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Shokrollahi</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Hand burns surface area: A rule of thumb</article-title>
          <source>Burns</source>
          <year>2018</year>
          <month>08</month>
          <volume>44</volume>
          <issue>5</issue>
          <fpage>1346</fpage>
          <lpage>1351</lpage>
          <pub-id pub-id-type="doi">10.1016/j.burns.2018.02.011</pub-id>
          <pub-id pub-id-type="medline">29534883</pub-id>
          <pub-id pub-id-type="pii">S0305-4179(18)30093-7</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
