<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMI</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Med Inform</journal-id>
      <journal-title>JMIR Medical Informatics</journal-title>
      <issn pub-type="epub">2291-9694</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v10i8e37284</article-id>
      <article-id pub-id-type="pmid">35994311</article-id>
      <article-id pub-id-type="doi">10.2196/37284</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Interactive Medical Image Labeling Tool to Construct a Robust Convolutional Neural Network Training Data Set: Development and Validation Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Lovis</surname>
            <given-names>Christian</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Whiteley</surname>
            <given-names>Mark S</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Reifs</surname>
            <given-names>David</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Digital Care Research Group, Centre for Health and Social Care</institution>
            <institution>Universitat of Vic-Central University of Catalonia</institution>
            <addr-line>Carrer de la Sagrada Família, 7</addr-line>
            <addr-line>Vic, 08500</addr-line>
            <country>Spain</country>
            <phone>34 938861222</phone>
            <email>david.reifs@uvic.cat</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-2945-9803</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Reig-Bolaño</surname>
            <given-names>Ramon</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-7648-4502</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Casals</surname>
            <given-names>Marta</given-names>
          </name>
          <degrees>MSN</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-9591-5953</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Grau-Carrion</surname>
            <given-names>Sergi</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-8223-2398</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Digital Care Research Group, Centre for Health and Social Care</institution>
        <institution>Universitat of Vic-Central University of Catalonia</institution>
        <addr-line>Vic</addr-line>
        <country>Spain</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Hospital Santa Creu de Vic</institution>
        <addr-line>Vic</addr-line>
        <country>Spain</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: David Reifs <email>david.reifs@uvic.cat</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <month>8</month>
        <year>2022</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>22</day>
        <month>8</month>
        <year>2022</year>
      </pub-date>
      <volume>10</volume>
      <issue>8</issue>
      <elocation-id>e37284</elocation-id>
      <history>
        <date date-type="received">
          <day>14</day>
          <month>2</month>
          <year>2022</year>
        </date>
        <date date-type="rev-request">
          <day>24</day>
          <month>3</month>
          <year>2022</year>
        </date>
        <date date-type="rev-recd">
          <day>10</day>
          <month>5</month>
          <year>2022</year>
        </date>
        <date date-type="accepted">
          <day>31</day>
          <month>7</month>
          <year>2022</year>
        </date>
      </history>
      <copyright-statement>©David Reifs, Ramon Reig-Bolaño, Marta Casals, Sergi Grau-Carrion. Originally published in JMIR Medical Informatics (https://medinform.jmir.org), 22.08.2022.</copyright-statement>
      <copyright-year>2022</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Informatics, is properly cited. The complete bibliographic information, a link to the original publication on https://medinform.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://medinform.jmir.org/2022/8/e37284" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Skin ulcers are an important cause of morbidity and mortality everywhere in the world and occur due to several causes, including diabetes mellitus, peripheral neuropathy, immobility, pressure, arteriosclerosis, infections, and venous insufficiency. Ulcers are lesions that fail to undergo an orderly healing process and produce functional and anatomical integrity in the expected time. In most cases, the methods of analysis used nowadays are rudimentary, which leads to errors and the use of invasive and uncomfortable techniques on patients. There are many studies that use a convolutional neural network to classify the different tissues in a wound. To obtain good results, the network must be trained with a correctly labeled data set by an expert in wound assessment. Typically, it is difficult to label pixel by pixel using a professional photo editor software, as this requires extensive time and effort from a health professional.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>The aim of this paper is to implement a new, fast, and accurate method of labeling wound samples for training a neural network to classify different tissues.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We developed a support tool and evaluated its accuracy and reliability. We also compared the support tool classification with a digital gold standard (labeling the data with an image editing software).</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The obtained comparison between the gold standard and the proposed method was 0.9789 for background, 0.9842 for intact skin, 0.8426 for granulation tissue, 0.9309 for slough, and 0.9871 for necrotic. The obtained speed on average was 2.6, compared to that of an advanced image editing user.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>This method increases tagging speed on average compared to an advanced image editing user. This increase is greater with untrained users. The samples obtained with the new system are indistinguishable from the samples made with the gold standard.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>wound assessment</kwd>
        <kwd>pressure ulcers</kwd>
        <kwd>wound tissue classification</kwd>
        <kwd>labeling</kwd>
        <kwd>machine learning</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Skin ulcers are an important cause of morbidity and mortality everywhere in the world [<xref ref-type="bibr" rid="ref1">1</xref>] and occur due to several causes, including diabetes mellitus, peripheral neuropathy, immobility, pressure, arteriosclerosis, infections, and venous insufficiency. Ulcers are lesions that fail to undergo an orderly healing process and produce functional and anatomical integrity in the expected time (4 weeks to 3 months) [<xref ref-type="bibr" rid="ref2">2</xref>]. This is usually due to an underlying pathology that prevents or delays healing. Ulcers have a major impact on the patient's life, causing a reduction in the quality of life in physical, emotional [<xref ref-type="bibr" rid="ref3">3</xref>], and social dimensions. Several contributing and confounding factors are associated with both the cause and maintenance of ulcers. In addition, care of these wounds requires the expenditure of human and material resources and generates a great economic impact [<xref ref-type="bibr" rid="ref4">4</xref>]. For these reasons, complex wounds such as ulcers are considered a major global problem.</p>
      <p>In most cases, the methods of analysis used nowadays are rudimentary, which leads to errors and the use of invasive and uncomfortable techniques for patients. It is extremely difficult to monitor [<xref ref-type="bibr" rid="ref5">5</xref>] the evolution of the wound based on the healing process as no data are stored or classified efficiently. Literature covering different algorithms focused on the detection and characterization of wounds is limited and mainly based on the capture of size and depth of the wounds [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref7">7</xref>]. There are many studies that use a convolutional neural network (CNN) to classify the different tissues in a wound [<xref ref-type="bibr" rid="ref8">8</xref>-<xref ref-type="bibr" rid="ref11">11</xref>]. However, the process of labeling the images for the training of a CNN in a supervised algorithm is hard work and requires extensive time and effort by a health professional.</p>
      <p>In current CNN training models, the labeling of the data set samples is a critical and important phase. In pretrained classification networks, images have been labeled using polygonal contour tools that help detect objects, parts of a body, animals, and so on [<xref ref-type="bibr" rid="ref12">12</xref>]. For tissue classification, more detailed labeling is required. A wound expert user will have to label the samples, typically using a professional photo editing software. Using the editing tools, this user will paint the different tissues of the wound with predetermined colors (eg, granulated in red, slough in yellow, necrotic in black, and intact skin in blue), pixel by pixel. At the end of the process, 2 files are obtained—1 with the original image and 1 modified with labels drawn with the editing software.</p>
      <p>The main goal of this work is to propose an interactive tool for labeling wound samples used for training a CNN to classify different tissues. With this interactive tool, the labeling process is faster, more efficient, and more accurate than with the current manual methods.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Materials</title>
        <p>The collection of the necessary data for labeling was made with a mobile app that uses a standard camera—in our case, a Samsung Galaxy S10 tablet. The data were collected in a health center by health care professionals.</p>
      </sec>
      <sec>
        <title>Ethics Approval</title>
        <p>The clinical protocol has been approved by the CEIC of the Hospital General de Vic (2019093/PR224).</p>
      </sec>
      <sec>
        <title>Proposal</title>
        <p>A proposed labeling tool is developed and presented in this study. The results of this application are used for training the CNN model (see the complete working framework in <xref rid="figure1" ref-type="fig">Figure 1</xref>). This tool is based on an image editor tool and allows for standard image editing actions such as zoom (<xref rid="figure2" ref-type="fig">Figure 2</xref>) and gamma correction (<xref rid="figure3" ref-type="fig">Figure 3</xref>). It uses computer vision techniques for tagging and labeling each tissue. </p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Generic overview of convolutional neural network (CNN) labeling, training, and inference process.</p>
          </caption>
          <graphic xlink:href="medinform_v10i8e37284_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Region selection to apply zoom (left) and the region zoomed (right).</p>
          </caption>
          <graphic xlink:href="medinform_v10i8e37284_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>The luminosity of the image can be modified by applying gamma correction. From left to right: original image, gamma value=0.5, and gamma value=2.</p>
          </caption>
          <graphic xlink:href="medinform_v10i8e37284_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>The interactive labeling tool can be divided into 2 working stages. In the first stage, the user can choose the part of the image of interest, using the mouse on the original image to define the region of interest (region to label). At the same time, the user can change the image parameters and hyperparameters of the automatic segmentation methods included in the tool.</p>
        <p>During the first stage, the tool suggests different partitions of the image the user can select based on which segments best suit the labeling objective and define their class (<xref rid="figure4" ref-type="fig">Figure 4</xref>). The partitions are calculated automatically, segmenting the image using computer vision methods and separating the different elements. When the user zooms in on parts of the image to be able to increase the precision in complex areas, the segmentation algorithm recalculates over the zoomed section (<xref rid="figure5" ref-type="fig">Figure 5</xref>). The user can also change the hyperparameters (parameters whose value is used to control the algorithm) of the segmentation algorithms to recalculate the partitions and get new proposals (<xref rid="figure6" ref-type="fig">Figure 6</xref>).</p>
        <p>In the second stage, the user will use the segmentations proposed by the tool to select those that best fit the clinical criteria for tissue classification. The user can make use of sections from different proposals. As the user selects the segmentations, the final labeled image will be drawn in the <italic>Mask</italic> section (<xref rid="figure4" ref-type="fig">Figure 4</xref>).</p>
        <p>Although the proposed tool allows a desired number of tissues to be tagged, this study was based on the hypothesis of labeling 5 types of tissues: intact skin, slough, necrotic, granulated, and background (or no skin). For this reason, only comparisons between these tissue labels will appear in the results presented.</p>
        <p>The segmentation process is based on superpixels and clustering methodologies. It uses different configurations of superpixels and clustering to receive different segmentations of the input image. The resulting segmentations are shown to the user to select the partitions that are closest to the tissue distributions.</p>
        <p>In addition, the app has 2 different tools for manual image editing (<xref rid="figure7" ref-type="fig">Figure 7</xref>). These tools allow for the correction of mislabeled regions, thus improving the quality of the edges or ambiguous regions hard to segment automatically. The first tool is a brush that allows the user to paint the image using the cursor. The second tool is equivalent to the “magic wand” tool where selecting a pixel in the image causes all the adjacent similar pixels under a threshold to be automatically selected as well.</p>
        <p>At the end of the process, the user can obtain a final labeled image where each pixel value is related to the class of the corresponding pixel in the original image (<xref rid="figure8" ref-type="fig">Figure 8</xref>).</p>
        <p>As mentioned before, the tool uses different computer visual methods based on superpixels (techniques 1, 2, and 3 below) and clustering (technique 4 below). Superpixels are an aggregation of pixels according to similar characteristics between them, such as raw pixel intensity. There are different algorithms and criteria used to measure the similarity between pixels. Clustering is an unsupervised machine learning technique that involves the grouping of data points in a different number of clusters according to the similarity between them.</p>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>Main menu view. Left options: brush, wand, back, gamma, quick, Felzenszwalb (FZ), N clusters, and simple linear iterative clustering (Slic). Right options: red (R), yellow (Y), orange (O), black (B), gray (G), blue, move (mv), save, and close.</p>
          </caption>
          <graphic xlink:href="medinform_v10i8e37284_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure5" position="float">
          <label>Figure 5</label>
          <caption>
            <p>Recalculated partitions from a zoom in the original image. Left options: brush, wand, back, gamma, quick, Felzenszwalb (FZ), N clusters, and simple linear iterative clustering (Slic). Right options: red (R), yellow (Y), orange (O), black (B), gray (G), blue, move (mv), save, and close.</p>
          </caption>
          <graphic xlink:href="medinform_v10i8e37284_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure6" position="float">
          <label>Figure 6</label>
          <caption>
            <p>Example of hyperparameters, from left to right: simple linear iterative clustering (SLIC) segmentation with 30 clusters and SLIC segmentation with 100 clusters.</p>
          </caption>
          <graphic xlink:href="medinform_v10i8e37284_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure7" position="float">
          <label>Figure 7</label>
          <caption>
            <p>Manual edition tools to classify pixels. RGB: an additive color model with primary colors (red, green, and blue); Std: standard deviation.</p>
          </caption>
          <graphic xlink:href="medinform_v10i8e37284_fig7.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure8" position="float">
          <label>Figure 8</label>
          <caption>
            <p>From left to right: original image and labeled image. The classified tissues are intact skin (green), slough (yellow), granulated (red), and background (blue). In this case, there is no presence of necrotic.</p>
          </caption>
          <graphic xlink:href="medinform_v10i8e37284_fig8.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <sec>
          <title>Technique 1: Felzenszwalb Efficient Graph-Based Segmentation</title>
          <p>Based on superpixels, this technique is a graph-based approach to segmentation [<xref ref-type="bibr" rid="ref13">13</xref>]. The goal was to develop a computational approach to image segmentation that is broadly useful, much in the way that other low-level techniques such as edge detection are used in a wide range of computer vision tasks. This technique connects elements of the graph according to similarity criteria and a greedy algorithm (<xref rid="figure9" ref-type="fig">Figure 9</xref>) to make the boundaries between the different segments more evident.</p>
          <p>The similarity criteria used is <italic>Pairwise Region Comparison Predicate</italic>. This predicate is based on measuring the dissimilarity between elements along the boundary of the 2 components. The difference between the 2 components is defined by the minimum weight edge connecting them together.</p>
          <fig id="figure9" position="float">
            <label>Figure 9</label>
            <caption>
              <p>Felzenszwalb segmentation.</p>
            </caption>
            <graphic xlink:href="medinform_v10i8e37284_fig9.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>Technique 2: Quickshift Image Segmentation</title>
          <p>This technique uses a “Mean-shift” [<xref ref-type="bibr" rid="ref14">14</xref>] algorithm that segments an RGB (red, green, and blue primary colors) image (or any image with more than one channel) by identifying clusters of pixels in the joint spatial and color dimensions. Segments are local (superpixels) and can be used as a basis for further processing. The cluster approach is carried out over a 5D space defined by the L,a,b values of the CIELAB (International Commission on Illumination) color space and the x,y pixel coordinates (<xref rid="figure10" ref-type="fig">Figure 10</xref>).</p>
          <p>Mean-shift is a mode-seeking algorithm that generates image segments by recursively moving to the kernel-smoothed centroid for every data point in the pixel feature space, effectively performing a gradient ascent. The generated segments or superpixels can be large or small based on the input kernel parameters, but there is no direct control over the number, size, or compactness of the resulting superpixels.</p>
          <fig id="figure10" position="float">
            <label>Figure 10</label>
            <caption>
              <p>Quickshift segmentation.</p>
            </caption>
            <graphic xlink:href="medinform_v10i8e37284_fig10.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>Technique 3: Simple Linear Iterative Clustering Superpixels</title>
          <p>This technique’s algorithm [<xref ref-type="bibr" rid="ref15">15</xref>] consists of simple linear iterative clustering, performing a local clustering of pixels in the 5D space defined by the L,a,b values of the CIELAB color space and the x,y pixel coordinates (<xref rid="figure11" ref-type="fig">Figure 11</xref>).</p>
          <p>For simple linear iterative clustering, each pixel in the image is associated with the nearest cluster center whose search area overlaps this pixel. After all the pixels are associated with the nearest cluster center, a new center is computed as the average labxy vector of all the pixels belonging to the cluster. We then iteratively repeat the process of associating pixels with the nearest cluster center and recomputing the cluster center until convergence.</p>
          <fig id="figure11" position="float">
            <label>Figure 11</label>
            <caption>
              <p>Simple linear iterative clustering (SLIC) segmentation.</p>
            </caption>
            <graphic xlink:href="medinform_v10i8e37284_fig11.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>Technique 4: K-Means Image Segmentation</title>
          <p>K-means [<xref ref-type="bibr" rid="ref16">16</xref>] is a clustering method used to divide a set of data into a specific number of groups. For image segmentation, the clusters are calculated by raw pixel intensities. Image pixels are associated to the nearest centroid using Euclidian distance as a similarity measure (<xref rid="figure12" ref-type="fig">Figure 12</xref>).</p>
          <fig id="figure12" position="float">
            <label>Figure 12</label>
            <caption>
              <p>K-means segmentation.</p>
            </caption>
            <graphic xlink:href="medinform_v10i8e37284_fig12.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <p>To evaluate this proposed method, we compared the results obtained by the proposed tool and the results obtained by wound experts using manual segmentation. The manual segmentation was carried out using Gimp, a free cross-platform image editing software, and the experts classified each label pixel by pixel.</p>
      <p>Specifically, we compared the time used to classify the wound images in each method and the accuracy of our method against the manual one.</p>
      <sec>
        <title>Time Evaluation</title>
        <p><xref ref-type="table" rid="table1">Table 1</xref> shows the time employed to label each one of the data set samples using the gold standard method versus the proposed method. With the proposed method, the image tagging speed is increased by an average of 2.6 times.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Comparison of the time employed to label each sample of the data set with the 2 referred methods, and the speedup achieved with the proposed method; time notation in minutes and seconds (mm:ss).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="210"/>
            <col width="290"/>
            <col width="280"/>
            <col width="220"/>
            <thead>
              <tr valign="top">
                <td>Sample</td>
                <td>Manual method (time)</td>
                <td>New method (time)</td>
                <td>Speedup achieved</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>1</td>
                <td>10:30</td>
                <td>2:47</td>
                <td>3.7x</td>
              </tr>
              <tr valign="top">
                <td>2</td>
                <td>05:35</td>
                <td>2:30</td>
                <td>2.2x</td>
              </tr>
              <tr valign="top">
                <td>3</td>
                <td>07:30</td>
                <td>2:06</td>
                <td>3.5x</td>
              </tr>
              <tr valign="top">
                <td>4</td>
                <td>09:15</td>
                <td>4:11</td>
                <td>2.2x</td>
              </tr>
              <tr valign="top">
                <td>5</td>
                <td>06:30</td>
                <td>4:42</td>
                <td>1.3x</td>
              </tr>
              <tr valign="top">
                <td>6</td>
                <td>13:24</td>
                <td>5:38</td>
                <td>2.3x</td>
              </tr>
              <tr valign="top">
                <td>7</td>
                <td>03:54</td>
                <td>0:41</td>
                <td>5.7x</td>
              </tr>
              <tr valign="top">
                <td>8</td>
                <td>03:02</td>
                <td>1:16</td>
                <td>2.3x</td>
              </tr>
              <tr valign="top">
                <td>9</td>
                <td>02:44</td>
                <td>2:09</td>
                <td>1.2x</td>
              </tr>
              <tr valign="top">
                <td>10</td>
                <td>07:06</td>
                <td>1:29</td>
                <td>4.7x</td>
              </tr>
              <tr valign="top">
                <td>11</td>
                <td>04:20</td>
                <td>1:30</td>
                <td>2.8x</td>
              </tr>
              <tr valign="top">
                <td>12</td>
                <td>04:42</td>
                <td>1:25</td>
                <td>3.3x</td>
              </tr>
              <tr valign="top">
                <td>13</td>
                <td>03:05</td>
                <td>1:01</td>
                <td>3.0x</td>
              </tr>
              <tr valign="top">
                <td>14</td>
                <td>06:37</td>
                <td>4:02</td>
                <td>1.6x</td>
              </tr>
              <tr valign="top">
                <td>15</td>
                <td>03:21</td>
                <td>1:15</td>
                <td>2.6x</td>
              </tr>
              <tr valign="top">
                <td>16</td>
                <td>02:49</td>
                <td>1:38</td>
                <td>1.7x</td>
              </tr>
              <tr valign="top">
                <td>17</td>
                <td>03:18</td>
                <td>1:35</td>
                <td>2.0x</td>
              </tr>
              <tr valign="top">
                <td>18</td>
                <td>05:07</td>
                <td>1:48</td>
                <td>2.8x</td>
              </tr>
              <tr valign="top">
                <td>19</td>
                <td>03:59</td>
                <td>2:50</td>
                <td>1.4x</td>
              </tr>
              <tr valign="top">
                <td>20</td>
                <td>03:17</td>
                <td>1:14</td>
                <td>2.6x</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Similarity</title>
        <p>Precision, recall, and <italic>F</italic>-score measures are used to evaluate the accuracy of labeling algorithms. The image obtained with the gold standard is taken as ground truth. When tagging an image, it is to be expected that the result obtained will be slightly different each time, even if the same tool and the same criteria are used. It is necessary to be able to evaluate whether the samples labeled with the new method are as similar to the gold standard reference samples as would be other samples made with the same method. Therefore, we relabeled all the gold standard samples to compare the quality of the similarity obtained. The exact correlation between gold standard and new labeling method would be 1.0 (<xref ref-type="table" rid="table2">Tables 2</xref> and <xref ref-type="table" rid="table3">3</xref>).</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Comparison between the gold standard and the proposed labeling method.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="270"/>
            <col width="230"/>
            <col width="250"/>
            <col width="250"/>
            <thead>
              <tr valign="top">
                <td>Tissue</td>
                <td>Precision</td>
                <td>Recall</td>
                <td><italic>F</italic>-score</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>No skin (background)</td>
                <td>0.9789</td>
                <td>0.9824</td>
                <td>0.9804</td>
              </tr>
              <tr valign="top">
                <td>Intact skin</td>
                <td>0.9842</td>
                <td>0.9867</td>
                <td>0.9854</td>
              </tr>
              <tr valign="top">
                <td>Granular</td>
                <td>0.8426</td>
                <td>0.9157</td>
                <td>0.8753</td>
              </tr>
              <tr valign="top">
                <td>Base</td>
                <td>0.9309</td>
                <td>0.8492</td>
                <td>0.8838</td>
              </tr>
              <tr valign="top">
                <td>Necrotic</td>
                <td>0.9871</td>
                <td>0.7362</td>
                <td>0.8387</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <table-wrap position="float" id="table3">
          <label>Table 3</label>
          <caption>
            <p>Comparison between the gold standard method samples.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="270"/>
            <col width="230"/>
            <col width="250"/>
            <col width="250"/>
            <thead>
              <tr valign="top">
                <td>Tissue</td>
                <td>Precision</td>
                <td>Recall</td>
                <td><italic>F</italic>-score</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>No skin (background)</td>
                <td>0.9919</td>
                <td>0.9921</td>
                <td>0.9919</td>
              </tr>
              <tr valign="top">
                <td>Intact skin</td>
                <td>0.9938</td>
                <td>0.9912</td>
                <td>0.9925</td>
              </tr>
              <tr valign="top">
                <td>Granular</td>
                <td>0.8265</td>
                <td>0.9377</td>
                <td>0.8730</td>
              </tr>
              <tr valign="top">
                <td>Base</td>
                <td>0.9172</td>
                <td>0.8821</td>
                <td>0.8932</td>
              </tr>
              <tr valign="top">
                <td>Necrotic</td>
                <td>0.9771</td>
                <td>0.7622</td>
                <td>0.8481</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <p>Precision is the relationship between the correctly predicted positive observations and the total expected positive observations. This metric determines how many pixels match out of all the pixels labeled as specific tissue. High precision is related to the low rate of false positives.</p>
        <p>Recall, or sensitivity, is the relationship between the correctly predicted positive observations and all positive observations of actual class. This metric determines how many pixels, out of all the pixels that truly matched, were labeled.</p>
        <p><italic>F</italic>-score provides a single score that balances the concerns of both precision and recall in one value. Therefore, this score considers both false positives and false negatives.</p>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>By analyzing the difference between images labeled with the 2 methods, we see that the discrepancies are found at the edges of the labeling (<xref rid="figure13" ref-type="fig">Figure 13</xref>).</p>
        <p>This observation is especially relevant for the evaluation of the smallest elements, where the area or perimeter ratio is more significant and can affect the evaluation of similarity. Likewise, any discrepancy of criteria that may exist in the labeling will affect the minority classes to a greater extent. The majority of the classes (no skin and intact skin) have higher <italic>F</italic>-score values than the rest of the classes.</p>
        <p>Evaluating the results in <xref ref-type="table" rid="table2">Tables 2</xref> and <xref ref-type="table" rid="table3">3</xref>, the results obtained with the 2 methods are highly similar, with almost no difference between the comparison of the labels.</p>
        <fig id="figure13" position="float">
          <label>Figure 13</label>
          <caption>
            <p>From left to right: examples of original image, labeled image with digital method, labeled with gold standard method, and differences between methods.</p>
          </caption>
          <graphic xlink:href="medinform_v10i8e37284_fig13.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>The proposed method increases tagging speed by an average of 2.6 compared to an advanced image editing user. This gain is larger with untrained users.</p>
        <p>The samples obtained with the proposed system are indistinguishable from the samples made with the gold standard.</p>
        <p>The incorporation of this type of algorithm will undoubtedly shorten the time required for training a tissue classification network. It provides a tool that can be used by any clinician regardless of their level of knowledge of photo editing. As such, it makes training and using the neural network approach accessible to all in a practical and fast way.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">CIELAB</term>
          <def>
            <p>International Commission on Illumination</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">CNN</term>
          <def>
            <p>convolutional neural network</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>We appreciate the collaboration and assistance by the members of the Wound Care department of Hospital de la Santa Creu de Vic who responded to the implementation, assessment, and validation of this new method in their organization. This work has been carried out within the framework of the doctoral program of the University of Vic—Central University of Catalonia.</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lazarus</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Valle</surname>
              <given-names>MF</given-names>
            </name>
            <name name-style="western">
              <surname>Malas</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Qazi</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Maruthur</surname>
              <given-names>NM</given-names>
            </name>
            <name name-style="western">
              <surname>Doggett</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Fawole</surname>
              <given-names>OA</given-names>
            </name>
            <name name-style="western">
              <surname>Bass</surname>
              <given-names>EB</given-names>
            </name>
            <name name-style="western">
              <surname>Zenilman</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Chronic venous leg ulcer treatment: future research needs</article-title>
          <source>Wound Repair Regen</source>
          <year>2014</year>
          <month>10</month>
          <day>17</day>
          <volume>22</volume>
          <issue>1</issue>
          <fpage>34</fpage>
          <lpage>42</lpage>
          <pub-id pub-id-type="doi">10.1111/wrr.12102</pub-id>
          <pub-id pub-id-type="medline">24134795</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Coerper</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Beckert</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Küper</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Jekov</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Königsrainer</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Fifty percent area reduction after 4 weeks of treatment is a reliable indicator for healing--analysis of a single-center cohort of 704 diabetic patients</article-title>
          <source>J Diabetes Complications</source>
          <year>2009</year>
          <month>1</month>
          <volume>23</volume>
          <issue>1</issue>
          <fpage>49</fpage>
          <lpage>53</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jdiacomp.2008.02.001</pub-id>
          <pub-id pub-id-type="medline">18394932</pub-id>
          <pub-id pub-id-type="pii">S1056-8727(08)00023-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Platsidaki</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Kouris</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Christodoulou</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Psychosocial aspects in patients with chronic leg ulcers</article-title>
          <source>Wounds</source>
          <year>2017</year>
          <month>10</month>
          <day>03</day>
          <volume>29</volume>
          <issue>10</issue>
          <fpage>306</fpage>
          <lpage>310</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://www.woundsresearch.com/article/psychosocial-aspects-patients-chronic-leg-ulcers"/>
          </comment>
          <pub-id pub-id-type="doi">10.25270/wnds/2017.10.306310</pub-id>
          <pub-id pub-id-type="medline">29091039</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nussbaum</surname>
              <given-names>SR</given-names>
            </name>
            <name name-style="western">
              <surname>Carter</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Fife</surname>
              <given-names>CE</given-names>
            </name>
            <name name-style="western">
              <surname>DaVanzo</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Haught</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Nusgart</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Cartwright</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>An economic evaluation of the impact, cost, and Medicare policy implications of chronic nonhealing wounds</article-title>
          <source>Value Health</source>
          <year>2018</year>
          <month>01</month>
          <volume>21</volume>
          <issue>1</issue>
          <fpage>27</fpage>
          <lpage>32</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1098-3015(17)30329-7"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jval.2017.07.007</pub-id>
          <pub-id pub-id-type="medline">29304937</pub-id>
          <pub-id pub-id-type="pii">S1098-3015(17)30329-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Veredas</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Mesa</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Morente</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Binary tissue classification on wound images with neural networks and bayesian classifiers</article-title>
          <source>IEEE Trans Med Imaging</source>
          <year>2010</year>
          <month>02</month>
          <volume>29</volume>
          <issue>2</issue>
          <fpage>410</fpage>
          <lpage>27</lpage>
          <pub-id pub-id-type="doi">10.1109/TMI.2009.2033595</pub-id>
          <pub-id pub-id-type="medline">19825516</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Restrepo-Medrano</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Verdú</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Medida de la cicatrización en úlceras por presión: ¿Con qué contamos?</article-title>
          <source>Gerokomos</source>
          <year>2011</year>
          <month>03</month>
          <volume>22</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>252</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://gneaupp.info/wp-content/uploads/2014/12/40_pdf.pdf"/>
          </comment>
          <pub-id pub-id-type="doi">10.4321/s1134-928x2011000100006</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Restrepo-Medrano</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Verdú Soriano</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Desarrollo de un índice de medida de la evolución hacia la cicatrización de las heridas crónicas</article-title>
          <source>Gerokomos</source>
          <year>2011</year>
          <month>12</month>
          <volume>22</volume>
          <issue>4</issue>
          <fpage>176</fpage>
          <lpage>183</lpage>
          <pub-id pub-id-type="doi">10.4321/s1134-928x2011000400005</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zahia</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sierra-Sosa</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Garcia-Zapirain</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Elmaghraby</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Tissue classification and segmentation of pressure injuries using convolutional neural networks</article-title>
          <source>Comput Methods Programs Biomed</source>
          <year>2018</year>
          <month>06</month>
          <volume>159</volume>
          <fpage>51</fpage>
          <lpage>58</lpage>
          <pub-id pub-id-type="doi">10.1016/j.cmpb.2018.02.018</pub-id>
          <pub-id pub-id-type="medline">29650318</pub-id>
          <pub-id pub-id-type="pii">S0169-2607(17)31486-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lucas</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Niri</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Treuillet</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Douzi</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Castaneda</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Wound size imaging: ready for smart assessment and monitoring</article-title>
          <source>Adv Wound Care (New Rochelle)</source>
          <year>2021</year>
          <month>11</month>
          <day>01</day>
          <volume>10</volume>
          <issue>11</issue>
          <fpage>641</fpage>
          <lpage>661</lpage>
          <pub-id pub-id-type="doi">10.1089/wound.2018.0937</pub-id>
          <pub-id pub-id-type="medline">32320356</pub-id>
          <pub-id pub-id-type="pmcid">PMC8392100</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Müller-Linow</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wilhelm</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Briese</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Wojciechowski</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Schurr</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Fiorani</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Plant Screen Mobile: an open-source mobile device app for plant trait analysis</article-title>
          <source>Plant Methods</source>
          <year>2019</year>
          <month>1</month>
          <day>11</day>
          <volume>15</volume>
          <issue>1</issue>
          <fpage>2</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://plantmethods.biomedcentral.com/articles/10.1186/s13007-019-0386-z"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s13007-019-0386-z</pub-id>
          <pub-id pub-id-type="medline">30651749</pub-id>
          <pub-id pub-id-type="pii">386</pub-id>
          <pub-id pub-id-type="pmcid">PMC6329080</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Reifs</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Angosto</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Fernandez</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Grau</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Reig-Bolaño</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Tissue segmentation for automatic chronic wound assessment</article-title>
          <source>Front. Artif. Intell. Appl</source>
          <year>2019</year>
          <volume>319</volume>
          <fpage>381</fpage>
          <lpage>384</lpage>
          <pub-id pub-id-type="doi">10.3233/FAIA190149</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wei</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Xia</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ni</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yan</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>HCP: a flexible CNN framework for multi-label image classification</article-title>
          <source>IEEE Trans. Pattern Anal. Mach. Intell</source>
          <year>2016</year>
          <month>9</month>
          <day>1</day>
          <volume>38</volume>
          <issue>9</issue>
          <fpage>1901</fpage>
          <lpage>1907</lpage>
          <pub-id pub-id-type="doi">10.1109/tpami.2015.2491929</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Felzenszwalb</surname>
              <given-names>PF</given-names>
            </name>
            <name name-style="western">
              <surname>Huttenlocher</surname>
              <given-names>DP</given-names>
            </name>
          </person-group>
          <article-title>Efficient graph-based image segmentation</article-title>
          <source>International Journal of Computer Vision</source>
          <year>2004</year>
          <month>09</month>
          <volume>59</volume>
          <issue>2</issue>
          <fpage>167</fpage>
          <lpage>181</lpage>
          <pub-id pub-id-type="doi">10.1023/b:visi.0000022288.19776.77</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Comaniciu</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Meer</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Mean shift: a robust approach toward feature space analysis</article-title>
          <source>IEEE Trans. Pattern Anal. Machine Intell</source>
          <year>2002</year>
          <volume>24</volume>
          <issue>5</issue>
          <fpage>603</fpage>
          <lpage>619</lpage>
          <pub-id pub-id-type="doi">10.1109/34.1000236</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Achanta</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Shaji</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Lucchi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Fua</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Süsstrunk</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>SLIC superpixels compared to state-of-the-art superpixel methods</article-title>
          <source>IEEE Trans Pattern Anal Mach Intell</source>
          <year>2012</year>
          <month>11</month>
          <volume>34</volume>
          <issue>11</issue>
          <fpage>2274</fpage>
          <lpage>82</lpage>
          <pub-id pub-id-type="doi">10.1109/TPAMI.2012.120</pub-id>
          <pub-id pub-id-type="medline">22641706</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lloyd</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Least squares quantization in PCM</article-title>
          <source>IEEE Trans. Inform. Theory</source>
          <year>1982</year>
          <month>03</month>
          <volume>28</volume>
          <issue>2</issue>
          <fpage>129</fpage>
          <lpage>137</lpage>
          <pub-id pub-id-type="doi">10.1109/tit.1982.1056489</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
