<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Med Inform</journal-id><journal-id journal-id-type="publisher-id">medinform</journal-id><journal-id journal-id-type="index">7</journal-id><journal-title>JMIR Medical Informatics</journal-title><abbrev-journal-title>JMIR Med Inform</abbrev-journal-title><issn pub-type="epub">2291-9694</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v13i1e62774</article-id><article-id pub-id-type="doi">10.2196/62774</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Convolutional Neural Network Models for Visual Classification of Pressure Ulcer Stages: Cross-Sectional Study</article-title></title-group><contrib-group><contrib contrib-type="author"><name name-style="western"><surname>Lei</surname><given-names>Changbin</given-names></name><degrees>MSN</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Jiang</surname><given-names>Yan</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Xu</surname><given-names>Ke</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Liu</surname><given-names>Shanshan</given-names></name><degrees>MSN</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Cao</surname><given-names>Hua</given-names></name><degrees>BSN</degrees><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Wang</surname><given-names>Cong</given-names></name><degrees>MSN</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib></contrib-group><aff id="aff1"><institution>Trauma Center, West China Hospital, West China School of Nursing, Sichuan University</institution><addr-line>Chengdu</addr-line><country>China</country></aff><aff id="aff2"><institution>Nursing Department, Evidence-Based Nursing Center, West China Hospital, Sichuan University</institution><addr-line>Chengdu</addr-line><country>China</country></aff><aff id="aff3"><institution>Evidence-Based Nursing Center, West China Hospital, Sichuan University</institution><addr-line>Chengdu</addr-line><country>China</country></aff><aff id="aff4"><institution>Neurosurgery Department, West China Hospital, Sichuan University</institution><addr-line>Chengdu</addr-line><country>China</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Lovis</surname><given-names>Christian</given-names></name></contrib><contrib contrib-type="editor"><name name-style="western"><surname>Chen</surname><given-names>Qingyu</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Prabhakar</surname><given-names>C J</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Tang</surname><given-names>Yunchao</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Yan Jiang, PhD, Nursing Department, Evidence-Based Nursing Center, West China Hospital, Sichuan University, 37# Guoxue Alley, Chengdu, 610041, China, 86 18980601511; <email>hxhljy2018@163.com</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>25</day><month>3</month><year>2025</year></pub-date><volume>13</volume><elocation-id>e62774</elocation-id><history><date date-type="received"><day>03</day><month>06</month><year>2024</year></date><date date-type="rev-recd"><day>21</day><month>10</month><year>2024</year></date><date date-type="accepted"><day>11</day><month>01</month><year>2025</year></date></history><copyright-statement>&#x00A9; Changbin Lei, Yan Jiang, Ke Xu, Shanshan Liu, Hua Cao, Cong Wang. Originally published in JMIR Medical Informatics (<ext-link ext-link-type="uri" xlink:href="https://medinform.jmir.org">https://medinform.jmir.org</ext-link>), 25.3.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Informatics, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://medinform.jmir.org/">https://medinform.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://medinform.jmir.org/2025/1/e62774"/><abstract><sec><title>Background</title><p>Pressure injuries (PIs) pose a negative health impact and a substantial economic burden on patients and society. Accurate staging is crucial for treating PIs. Owing to the diversity in the clinical manifestations of PIs and the lack of objective biochemical and pathological examinations, accurate staging of PIs is a major challenge. The deep learning algorithm, which uses convolutional neural networks (CNNs), has demonstrated exceptional classification performance in the intricate domain of skin diseases and wounds and has the potential to improve the staging accuracy of PIs.</p></sec><sec><title>Objective</title><p>We explored the potential of applying AlexNet, VGGNet16, ResNet18, and DenseNet121 to PI staging, aiming to provide an effective tool to assist in staging.</p></sec><sec sec-type="methods"><title>Methods</title><p>PI images from patients&#x2014;including those with stage I, stage II, stage III, stage IV, unstageable, and suspected deep tissue injury (SDTI)&#x2014;were collected at a tertiary hospital in China. Additionally, we augmented the PI data by cropping and flipping the PI images 9 times. The collected images were then divided into training, validation, and test sets at a ratio of 8:1:1. We subsequently trained them via AlexNet, VGGNet16, ResNet18, and DenseNet121 to develop staging models.</p></sec><sec sec-type="results"><title>Results</title><p>We collected 853 raw PI images with the following distributions across stages: stage I (n=148), stage II (n=121), stage III (n=216), stage IV (n=110), unstageable (n=128), and SDTI (n=130). A total of 7677 images were obtained after data augmentation. Among all the CNN models, DenseNet121 demonstrated the highest overall accuracy of 93.71%. The classification performances of AlexNet, VGGNet16, and ResNet18 exhibited overall accuracies of 87.74%, 82.42%, and 92.42%, respectively.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>The CNN-based models demonstrated strong classification ability for PI images, which might promote highly efficient, intelligent PI staging methods. In the future, the models can be compared with nurses with different levels of experience to further verify the clinical application effect.</p></sec></abstract><kwd-group><kwd>pressure ulcer</kwd><kwd>deep learning</kwd><kwd>artificial intelligence</kwd><kwd>neural network</kwd><kwd>CNN</kwd><kwd>machine learning</kwd><kwd>image</kwd><kwd>imaging</kwd><kwd>classification</kwd><kwd>ulcer</kwd><kwd>sore</kwd><kwd>pressure</kwd><kwd>wound</kwd><kwd>skin</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Pressure injuries (PIs), also known as pressure ulcers, are prevalent in health care settings and result from sustained pressure or shear forces on bony prominences and soft tissues [<xref ref-type="bibr" rid="ref1">1</xref>]. A meta-analysis encompassing 2,579,049 hospitalized adult patients revealed a 12.8% prevalence of PIs, and the estimated annual total cost of managing PIs increased approximately several times even in high-income countries [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref3">3</xref>]. Accurate staging of PIs is essential for determining appropriate treatment protocols and predicting clinical outcomes. Traditional staging relies on subjective evaluation by health care providers, often wound nurses, who use guidelines such as the <italic>Prevention and Treatment of Pressure Ulcers/Injuries: Clinical Practice Guideline, International Guideline 2019</italic>, and divide the PI stage into 6 stages&#x2014;namely, stages I, II, III, and IV, unstageable, and suspected deep tissue injury (SDTI)&#x2014;based on visual assessment, such as the color and texture characteristics of the wound bed, wound edges, and skin around the wound [<xref ref-type="bibr" rid="ref4">4</xref>].</p><p>However, visual assessment conducted by medical staff is subject to considerable interrater and intrarater variability [<xref ref-type="bibr" rid="ref5">5</xref>]. Moreover, the staging results are significantly influenced by the level of wound knowledge and the clinical experience of the evaluators. It has been reported that only 23%-58% of medical staff correctly classify PIs [<xref ref-type="bibr" rid="ref6">6</xref>]. Therefore, accurate staging of PIs is still a great challenge, and a universal, reliable, and more objective staging system is urgently needed.</p><p>Convolutional neural networks (CNNs) offer an opportunity to increase the objectivity and accuracy of PI staging. CNNs, a subset of deep learning (DL) models, have shown remarkable potential in medical image analysis [<xref ref-type="bibr" rid="ref7">7</xref>]. CNNs automatically extract and learn hierarchical features from grid-like data, such as images, and have achieved performance levels comparable to or surpassing those of human experts in various medical imaging domains [<xref ref-type="bibr" rid="ref8">8</xref>]. In dermatological and wound imaging, CNNs have demonstrated promising results, matching or even exceeding the diagnostic accuracy of dermatologists in classifying skin cancer and other skin lesions [<xref ref-type="bibr" rid="ref9">9</xref>]. On the basis of the good classification performance of CNNs in medical images and the increased clinical need for automated staging of PIs, in recent years, the use of CNNs to learn PI images has gradually developed, but there are also areas for improvement. For example, the use of a singular model had an accuracy rate below 90%, alongside images sourced from outdated databases and of poor quality, and the labeling of PI images often relies on the subjective judgment of clinical experts, so the quality and consistency of the images were uneven in these studies, which limit the evidence supporting the model&#x2019;s widespread applicability [<xref ref-type="bibr" rid="ref10">10</xref>-<xref ref-type="bibr" rid="ref12">12</xref>]. Therefore, further exploration is needed in this field to prove the effectiveness of CNNs.</p><p>In this study, we trained AlexNet, VGGNet16, ResNet18, and DenseNet121 to classify PI images with the aim of contributing to artificial intelligence&#x2013;driven wound care knowledge and informing the development of advanced clinical tools.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Setting and Samples</title><p>Our study was conducted at a tertiary hospital in Chengdu, China, from March 1, 2022, to September 30, 2024.</p><p>The inclusion criteria for patients were as follows: (1) PIs were diagnosed by 2 wound therapists simultaneously on the basis of the <italic>Prevention and Treatment of Pressure Ulcers/Injuries: Clinical Practice Guideline, International Guideline 2019</italic> [<xref ref-type="bibr" rid="ref4">4</xref>] and (2) the patients provided informed consent.</p><p>The exclusion criteria for patients were as follows: (1) PIs included other skin diseases, such as incontinence dermatitis; (2) PIs were covered by dressings or tattoos; (3) exudation was excessive, resulting in obvious reflection; (4) mucosal PIs; (5) the patient&#x2019;s vital signs remained too unstable to exchange positions; and (6) the patients were in isolation wards.</p><p>The inclusion criteria for images were as follows: (1) images were taken by the investigator, and (2) images fully presented the wound triangle, namely, the wound bed, wound edge, and surrounding wound tissue.</p><p>The exclusion criteria for images were as follows: (1) images containing nonwounded and skin tissues such as clothing and sheets and (2) images that were blurred and overexposed.</p></sec><sec id="s2-2"><title>Dataset</title><p>We recruited wound therapists who were awarded an international ostomy wound certificate to act as PI staging evaluators. Five wound therapists were included in this study. The basic information is provided in <xref ref-type="table" rid="table1">Table 1</xref>. To test the consistency of the 5 wound therapists&#x2019; assessments, we sent them 30 PI images from the NPIAP (National Pressure Injury Advisory Panel) website in the form of a questionnaire [<xref ref-type="bibr" rid="ref13">13</xref>]. The Fleiss kappa coefficient was 0.856, <italic>P</italic>&#x003C;.001, indicating strong staging consistency among them [<xref ref-type="bibr" rid="ref14">14</xref>]. The staging results are shown in <xref ref-type="table" rid="table2">Tables 2</xref> and <xref ref-type="table" rid="table3">3</xref>.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Basic information of the 5 wound therapists.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Order</td><td align="left" valign="bottom">Gender</td><td align="left" valign="bottom">Age (years)</td><td align="left" valign="bottom">Working years</td><td align="left" valign="bottom">Department</td></tr></thead><tbody><tr><td align="left" valign="top">A</td><td align="left" valign="top">Female</td><td align="char" char="." valign="top">42</td><td align="char" char="." valign="top">20</td><td align="left" valign="top">Respiratory ICU<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td></tr><tr><td align="left" valign="top">B</td><td align="left" valign="top">Female</td><td align="char" char="." valign="top">36</td><td align="char" char="." valign="top">15</td><td align="left" valign="top">Neurology</td></tr><tr><td align="left" valign="top">C</td><td align="left" valign="top">Female</td><td align="char" char="." valign="top">40</td><td align="char" char="." valign="top">18</td><td align="left" valign="top">General ICU</td></tr><tr><td align="left" valign="top">D</td><td align="left" valign="top">Female</td><td align="char" char="." valign="top">43</td><td align="char" char="." valign="top">25</td><td align="left" valign="top">Neurology</td></tr><tr><td align="left" valign="top">E</td><td align="left" valign="top">Female</td><td align="char" char="." valign="top">36</td><td align="char" char="." valign="top">15</td><td align="left" valign="top">ICU</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>ICU: intensive care unit.</p></fn></table-wrap-foot></table-wrap><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Pressure injury staging results of the 5 wound therapists.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom" rowspan="2">Order</td><td align="left" valign="bottom" colspan="5">Staging results of the 5 wound therapists</td></tr><tr><td align="left" valign="bottom">A</td><td align="left" valign="bottom">B</td><td align="left" valign="bottom">C</td><td align="left" valign="bottom">D</td><td align="left" valign="bottom">E</td></tr></thead><tbody><tr><td align="left" valign="top">1</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage IV</td></tr><tr><td align="left" valign="top">2</td><td align="left" valign="top">Stage I</td><td align="left" valign="top">Stage I</td><td align="left" valign="top">Stage I</td><td align="left" valign="top">Stage I</td><td align="left" valign="top">Stage I</td></tr><tr><td align="left" valign="top">3</td><td align="left" valign="top">Stage II</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage II</td><td align="left" valign="top">Stage II</td><td align="left" valign="top">Stage II</td></tr><tr><td align="left" valign="top">4</td><td align="left" valign="top">Unstageable</td><td align="left" valign="top">Unstageable</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Unstageable</td><td align="left" valign="top">Unstageable</td></tr><tr><td align="left" valign="top">5</td><td align="left" valign="top">Unstageable</td><td align="left" valign="top">Unstageable</td><td align="left" valign="top">Unstageable</td><td align="left" valign="top">Unstageable</td><td align="left" valign="top">Unstageable</td></tr><tr><td align="left" valign="top">6</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td></tr><tr><td align="left" valign="top">7</td><td align="left" valign="top">SDTI<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup></td><td align="left" valign="top">SDTI</td><td align="left" valign="top">SDTI</td><td align="left" valign="top">SDTI</td><td align="left" valign="top">SDTI</td></tr><tr><td align="left" valign="top">8</td><td align="left" valign="top">Stage I</td><td align="left" valign="top">Stage I</td><td align="left" valign="top">Stage I</td><td align="left" valign="top">Unstageable</td><td align="left" valign="top">Stage I</td></tr><tr><td align="left" valign="top">9</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td></tr><tr><td align="left" valign="top">10</td><td align="left" valign="top">Unstageable</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td></tr><tr><td align="left" valign="top">11</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage III</td></tr><tr><td align="left" valign="top">12</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage III</td></tr><tr><td align="left" valign="top">13</td><td align="left" valign="top">Stage II</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage II</td><td align="left" valign="top">Stage II</td><td align="left" valign="top">Stage II</td></tr><tr><td align="left" valign="top">14</td><td align="left" valign="top">SDTI</td><td align="left" valign="top">SDTI</td><td align="left" valign="top">SDTI</td><td align="left" valign="top">SDTI</td><td align="left" valign="top">SDTI</td></tr><tr><td align="left" valign="top">15</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td></tr><tr><td align="left" valign="top">16</td><td align="left" valign="top">SDTI</td><td align="left" valign="top">SDTI</td><td align="left" valign="top">SDTI</td><td align="left" valign="top">SDTI</td><td align="left" valign="top">SDTI</td></tr><tr><td align="left" valign="top">17</td><td align="left" valign="top">Stage I</td><td align="left" valign="top">Stage I</td><td align="left" valign="top">Stage I</td><td align="left" valign="top">Stage I</td><td align="left" valign="top">Stage I</td></tr><tr><td align="left" valign="top">18</td><td align="left" valign="top">Stage II</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage II</td><td align="left" valign="top">Stage II</td><td align="left" valign="top">Stage II</td></tr><tr><td align="left" valign="top">19</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage III</td></tr><tr><td align="left" valign="top">20</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage SDTI</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td></tr><tr><td align="left" valign="top">21</td><td align="left" valign="top">Stage I</td><td align="left" valign="top">Stage I</td><td align="left" valign="top">Stage I</td><td align="left" valign="top">Stage I</td><td align="left" valign="top">Stage I</td></tr><tr><td align="left" valign="top">22</td><td align="left" valign="top">Unstageable</td><td align="left" valign="top">Unstageable</td><td align="left" valign="top">Unstageable</td><td align="left" valign="top">Unstageable</td><td align="left" valign="top">Unstageable</td></tr><tr><td align="left" valign="top">23</td><td align="left" valign="top">SDTI</td><td align="left" valign="top">SDTI</td><td align="left" valign="top">SDTI</td><td align="left" valign="top">SDTI</td><td align="left" valign="top">SDTI</td></tr><tr><td align="left" valign="top">24</td><td align="left" valign="top">Stage II</td><td align="left" valign="top">Stage II</td><td align="left" valign="top">Stage II</td><td align="left" valign="top">Stage II</td><td align="left" valign="top">Stage II</td></tr><tr><td align="left" valign="top">25</td><td align="left" valign="top">Stage I</td><td align="left" valign="top">Stage I</td><td align="left" valign="top">Stage I</td><td align="left" valign="top">Stage I</td><td align="left" valign="top">Stage I</td></tr><tr><td align="left" valign="top">26</td><td align="left" valign="top">Stage II</td><td align="left" valign="top">Stage II</td><td align="left" valign="top">Stage II</td><td align="left" valign="top">Stage II</td><td align="left" valign="top">Stage II</td></tr><tr><td align="left" valign="top">27</td><td align="left" valign="top">Unstageable</td><td align="left" valign="top">Unstageable</td><td align="left" valign="top">Unstageable</td><td align="left" valign="top">Unstageable</td><td align="left" valign="top">Unstageable</td></tr><tr><td align="left" valign="top">28</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td><td align="left" valign="top">Stage IV</td></tr><tr><td align="left" valign="top">29</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage III</td><td align="left" valign="top">Stage III</td></tr><tr><td align="left" valign="top">30</td><td align="left" valign="top">SDTI</td><td align="left" valign="top">SDTI</td><td align="left" valign="top">SDTI</td><td align="left" valign="top">I</td><td align="left" valign="top">SDTI</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>SDTI: suspected deep tissue injury.</p></fn></table-wrap-foot></table-wrap><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Coefficient of internal consistency among the 5 wound therapists. The sample data contain 30 valid participants and 5 raters.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom"/><td align="left" valign="bottom"/><td align="left" valign="bottom" colspan="3">Asymptotic</td><td align="left" valign="bottom"/></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Kappa</td><td align="left" valign="top">SE</td><td align="left" valign="top"><italic>Z</italic></td><td align="left" valign="top"><italic>P</italic> value</td><td align="left" valign="top">Asymptotic 95% CI</td></tr></thead><tbody><tr><td align="left" valign="top">Overall</td><td align="left" valign="top">0.856</td><td align="left" valign="top">0.026</td><td align="left" valign="top">32.948</td><td align="left" valign="top">&#x003C;.001</td><td align="left" valign="top">0.805-0.906</td></tr></tbody></table></table-wrap></sec><sec id="s2-3"><title>The PI Staging Assessment Process and Shooting Process</title><p>Before taking the shots, the researcher and the 2 wound therapists communicated with the patients and their family members to obtain consent. After that, the 2 therapists simultaneously evaluated the staging of the pressure ulcers via the <italic>Prevention and Treatment of Pressure Ulcers/Injuries: Clinical Practice Guideline, International Guideline 2019</italic>. If there were any objections to the results, we informed the third wound therapist on duty, and they would negotiate the staging.</p><p>The following types of shooting equipment were used: (1) Fuji XT-4 was used for shooting, with 21.6 million effective pixels, and Fuji XF 60 mm F2.4 R Macro and (2) a gray card. The specifications of the shooting mode are as follows: (1) antishake plus automatic mode; (2) parameter setting: shutter time: 60/1 s; (3) sensitivity: 3600-6000; (4) aperture: F6-8; (5) focal length: 60 mm; and (6) white balance: automatic white balance.</p><p>When photographs were taken, the camera was positioned parallel to the wound, with a focus on both the wound itself and the surrounding skin (<xref ref-type="fig" rid="figure1">Figure 1</xref>). Additionally, a gray card was used to minimize any interference caused by natural light. Following each shot, the images were carefully examined. If any image was deemed unclear, the researcher retracted the image. All of the original images were saved in JPG format and given a specific name consisting of a stage followed by a corresponding number. Finally, the images were transferred onto a computer for further analysis and storage.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Shooting process.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v13i1e62774_fig01.png"/></fig></sec><sec id="s2-4"><title>Image Augmentation</title><p>Horizontal flipping, vertical flipping, and random clipping were used for data augmentation to expand the training dataset and improve the generalization ability of the models (<xref ref-type="fig" rid="figure2">Figure 2</xref>) [<xref ref-type="bibr" rid="ref15">15</xref>]. Taking random cropping as an example, the size of the original image was reset to 512&#x00D7;512&#x00D7;3, and after random cropping, the size was 256&#x00D7;256&#x00D7;3. The size of the augmented image was fixed again to 224&#x00D7;224&#x00D7;3 before being input into the networks so that the network model could recognize them.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Image augmentation: (A) original image, (B) horizontal flip, (C) vertical flip, and (D) random clipping.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v13i1e62774_fig02.png"/></fig></sec><sec id="s2-5"><title>Image Normalization</title><p>The PI images are RGB color patterns (<xref ref-type="fig" rid="figure3">Figure 3</xref>), and the number of pixels is [0, 256] [<xref ref-type="bibr" rid="ref16">16</xref>]. To reduce the adverse effects caused by singular sample data and speed up model training, we limit the number of pixels to [&#x2212;1, 1] in this study via 2 image normalization calculation formulas, where <inline-formula><mml:math id="ieqn1"><mml:mover accent="false"><mml:mrow><mml:mi mathvariant="normal">x</mml:mi></mml:mrow><mml:mo>&#x00AF;</mml:mo></mml:mover></mml:math></inline-formula> is the mean of the number of pixels, <italic>N</italic> represents the number of pixels, and <italic>x</italic> represents the pixel value of each pixel [<xref ref-type="bibr" rid="ref17">17</xref>]. The calculation formula is as follows:</p><p>(1)<inline-formula><mml:math id="ieqn2"><mml:mrow><mml:mi>s</mml:mi><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mrow><mml:mi>N</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:mfrac><mml:mstyle displaystyle="true"><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>N</mml:mi></mml:munderover><mml:mrow><mml:msup><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#x2212;</mml:mo><mml:mover accent="true"><mml:mi>x</mml:mi><mml:mo stretchy="true">&#x00AF;</mml:mo></mml:mover></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac></mml:mrow></mml:msup></mml:mrow></mml:math></inline-formula></p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Image normalization: (A) original image and (B) normalized image.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v13i1e62774_fig03.png"/></fig><p>The Adam optimizer was used, and the loss function was the cross-entropy loss function (cross-entropy loss function), which is commonly used in multiple classification tasks. The data batch (batch size) was set to 8, and the initial learning rate was set to 0.0001. Every network was trained for 30 epochs.</p><p>This study was based on the 64-bit Windows 11 computer system and the Ubuntu 16.04 operating system for image training in the PyTorch 2.0.1 framework using the Python 3.9.19 language with CUDA 11.8 and NVIDIA GeForce RTX 4060 Laptop (8G).</p><p>The process of model training was divided into training, validation, and testing. The process is shown in <xref ref-type="fig" rid="figure4">Figure 4</xref>. First, the PI image was cut into the main wound area, and the cut image was treated as a uniform pixel, both of which were cut to 224 pixels. In the training set, the CNN training model is input; the performance of the model on the validation set is evaluated; and the state and convergence of the model are tested to adjust the hyperparameters. In the test set, the model outputs corresponding prediction results through a series of convolutions, nonlinear activations, pooling, etc, to evaluate the generalization ability of the model.</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>The network workflow diagrams. SDTI: suspected deep tissue injury.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v13i1e62774_fig04.png"/></fig><p>In our study, we used AlexNet, VGGNet 16, ResNet 18, and DenseNet121 to train the images. AlexNet, a pioneering CNN in image classification, consists of 8 layers: 5 convolutional layers with varying filter numbers and 3 fully connected layers. It employs ReLU activations, max pooling, and dropout for regularization. The introduction of ReLU and dropout layers in AlexNet reduced training times and prevented overfitting, whereas its deep architecture allowed for the learning of complex features, enhancing classification accuracy [<xref ref-type="bibr" rid="ref18">18</xref>].</p><p>VGGNet 16 is a 16-layer deep network consisting of 13 convolutional layers and 3 fully connected layers, all of which use 3&#x00D7;3 filters. It features a consistent architecture with convolutional layers followed by max pooling layers, which simplifies optimization. This network is effective for capturing grid-like patterns in images and allows for the learning of more abstract and complex features due to its depth [<xref ref-type="bibr" rid="ref19">19</xref>].</p><p>ResNet 18 is a shallow network with 18 layers organized in a residual learning framework. It consists of 4 blocks of residual units, each with convolutional layers, batch normalization, ReLU activation, and max pooling layers. The key innovation is the introduction of residual connections, which allow the network to learn residual mappings, making it easier to train deeper networks by effectively flowing gradients through the network [<xref ref-type="bibr" rid="ref20">20</xref>].</p><p>DenseNet121 connects each layer to every other layer in a feedforward manner, using features from all preceding layers as inputs and its own feature map for all subsequent layers. This dense connectivity reduces the number of parameters, making DenseNet121 more parameter-efficient than other CNNs of similar depth. It helps mitigate the vanishing gradient problem and enhances feature propagation, leading to improved performance and efficiency [<xref ref-type="bibr" rid="ref21">21</xref>].</p><p>In summary, the 4 CNN models were selected for their historical performance in image classification tasks, the uniqueness of their architectural design, and their significant contributions to the field of DL (<xref ref-type="fig" rid="figure4">Figure 4</xref>).</p></sec><sec id="s2-6"><title>Evaluation of Performance</title><p>We evaluated the performance from a single image result. The diagnostic performance was measured by accuracy (ACC), precision (Pre), recall (Rec), and the <italic>F</italic><sub>1</sub>-score. To calculate the above metrics, we defined an abnormal result as positive and a normal result as negative.</p><p>(1) <inline-formula><mml:math id="ieqn3"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">ACC</mml:mtext><mml:mo mathvariant="italic">=</mml:mo><mml:mfrac><mml:mrow><mml:mtext mathvariant="italic">TP</mml:mtext><mml:mo mathvariant="italic">+</mml:mo><mml:mtext mathvariant="italic">TN</mml:mtext></mml:mrow><mml:mrow><mml:mtext mathvariant="italic">TP</mml:mtext><mml:mo mathvariant="italic">+</mml:mo><mml:mtext mathvariant="italic">FP</mml:mtext><mml:mo mathvariant="italic">+</mml:mo><mml:mtext mathvariant="italic">FN</mml:mtext><mml:mo mathvariant="italic">+</mml:mo><mml:mtext mathvariant="italic">TN</mml:mtext></mml:mrow></mml:mfrac></mml:mrow></mml:mrow></mml:mstyle></mml:math></inline-formula></p><p>(2) <inline-formula><mml:math id="ieqn4"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">Pre</mml:mtext><mml:mo mathvariant="italic">=</mml:mo><mml:mfrac><mml:mtext mathvariant="italic">TP</mml:mtext><mml:mrow><mml:mtext mathvariant="italic">TP</mml:mtext><mml:mo mathvariant="italic">+</mml:mo><mml:mtext mathvariant="italic">FP</mml:mtext></mml:mrow></mml:mfrac></mml:mrow></mml:mrow></mml:mstyle></mml:math></inline-formula></p><p>(3) <inline-formula><mml:math id="ieqn5"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">Rec</mml:mtext><mml:mo mathvariant="italic">=</mml:mo><mml:mfrac><mml:mtext mathvariant="italic">TP</mml:mtext><mml:mrow><mml:mtext mathvariant="italic">TP</mml:mtext><mml:mo mathvariant="italic">+</mml:mo><mml:mtext mathvariant="italic">FN</mml:mtext></mml:mrow></mml:mfrac></mml:mrow></mml:mrow></mml:mstyle></mml:math></inline-formula></p><p>(4) <inline-formula><mml:math id="ieqn6"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mrow><mml:mi mathvariant="italic">F</mml:mi><mml:mn mathvariant="italic">1</mml:mn><mml:mtext mathvariant="italic">-score</mml:mtext><mml:mo mathvariant="italic">=</mml:mo><mml:mn mathvariant="italic">2</mml:mn><mml:mo>&#x22C5;</mml:mo><mml:mfrac><mml:mrow><mml:mtext mathvariant="italic">Precision</mml:mtext><mml:mo>&#x22C5;</mml:mo><mml:mtext mathvariant="italic">Recall</mml:mtext></mml:mrow><mml:mrow><mml:mtext mathvariant="italic">Precision</mml:mtext><mml:mo mathvariant="italic">+</mml:mo><mml:mtext mathvariant="italic">Recall</mml:mtext></mml:mrow></mml:mfrac></mml:mrow></mml:mrow></mml:mstyle></mml:math></inline-formula></p></sec><sec id="s2-7"><title>Ethical Considerations</title><p>This study was approved by the Biomedical Ethics Committee of the West China Hospital of Sichuan University (#1053). In this study, the informed consent process was strictly carried out. Participation in the study was completely voluntary, and patients could refuse to participate or withdraw at any time during any phase of the study without discrimination or retaliation and without affecting their medical treatment and benefits. If participants decided to withdraw from the study, they would contact us. Patient privacy was strictly protected, and all data obtained were ony used for this study. Patients did not have to pay any fees to participate in the study.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><p>We collected 853 raw PI images in this study. We set the images into a training set, a validation set, and a test set at a ratio of 8:1:1. After augmentation, 7677 images were used. A confusion matrix is a numerical table used to display the performance results of a classification model on test data with known target labels. It serves as a visual representation of how the model makes predictions on the test dataset. On the basis of the results obtained from each network validation, we plotted the normalized confusion matrix, with the true labels on the vertical axis and the predicted labels on the horizontal axis (<xref ref-type="fig" rid="figure5">Figures 5</xref><xref ref-type="fig" rid="figure6"/><xref ref-type="fig" rid="figure7"/>-<xref ref-type="fig" rid="figure8">8</xref>). <xref ref-type="table" rid="table4">Table 4</xref> provides a summary of the accuracy, precision, recall, and <italic>F</italic><sub>1</sub>-scores for the 4 CNNs.</p><fig position="float" id="figure5"><label>Figure 5.</label><caption><p>Performance of the CNNs in the identification and classification of PI images. Confusion matrix showing the accuracy and precision of 87.74% and 97.48%, respectively. CNN: convolutional neural network; PI: pressure injury; SDTI: suspected deep tissue injury.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v13i1e62774_fig05.png"/></fig><fig position="float" id="figure6"><label>Figure 6.</label><caption><p>Performance of the CNNs in the identification and classification of PI images. Confusion matrix showing the accuracy and precision of 82.42% and 92.4%, respectively. CNN: convolutional neural network; PI: pressure injury; SDTI: suspected deep tissue injury.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v13i1e62774_fig06.png"/></fig><fig position="float" id="figure7"><label>Figure 7.</label><caption><p>Performance of the CNNs in the identification and classification of PI images. Confusion matrix showing the accuracy and precision of 92.42% and 98.43%, respectively. CNN: convolutional neural network; PI: pressure injury; SDTI: suspected deep tissue injury.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v13i1e62774_fig07.png"/></fig><fig position="float" id="figure8"><label>Figure 8.</label><caption><p>Performance of the CNNs in the identification and classification of PI images. Confusion matrix showing the accuracy and precision of 93.71% and 98.72%, respectively. CNN: convolutional neural network; PI: pressure injury; SDTI: suspected deep tissue injury.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v13i1e62774_fig08.png"/></fig><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>The model&#x2019;s overall classification performance.</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom" colspan="2">Modes and PI<sup><xref ref-type="table-fn" rid="table4fn1">a</xref></sup> stages</td><td align="left" valign="bottom">Accuracy (%)</td><td align="left" valign="bottom">Precision (%)</td><td align="left" valign="bottom">Recall(%)</td><td align="left" valign="bottom"><italic>F</italic><sub>1</sub>-score (%)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="6">AlexNet</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Stage I</td><td align="left" valign="top">92.76</td><td align="left" valign="top">98.25</td><td align="left" valign="top">91.43</td><td align="left" valign="top">94.71</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Stage II</td><td align="left" valign="top">86.36</td><td align="left" valign="top">97.60</td><td align="left" valign="top">92.76</td><td align="left" valign="top">95.12</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Stage III</td><td align="left" valign="top">73.91</td><td align="left" valign="top">95.53</td><td align="left" valign="top">86.36</td><td align="left" valign="top">90.71</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Stage IV</td><td align="left" valign="top">89.89</td><td align="left" valign="top">99.00</td><td align="left" valign="top">73.91</td><td align="left" valign="top">84.63</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Unstageable</td><td align="left" valign="top">79.38</td><td align="left" valign="top">98.29</td><td align="left" valign="top">89.89</td><td align="left" valign="top">93.90</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">SDTI<sup><xref ref-type="table-fn" rid="table4fn2">b</xref></sup></td><td align="left" valign="top">91.43</td><td align="left" valign="top">96.24</td><td align="left" valign="top">79.38</td><td align="left" valign="top">87.00</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Overall</td><td align="left" valign="top">87.74</td><td align="left" valign="top">97.48</td><td align="left" valign="top">85.62</td><td align="left" valign="top">94.71</td></tr><tr><td align="left" valign="top" colspan="6">VGGNet 16</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Stage I</td><td align="left" valign="top">96.19</td><td align="left" valign="top">99.19</td><td align="left" valign="top">96.19</td><td align="left" valign="top">97.66</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Stage II</td><td align="left" valign="top">80.26</td><td align="left" valign="top">93.78</td><td align="left" valign="top">93.78</td><td align="left" valign="top">86.49</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Stage III</td><td align="left" valign="top">82.47</td><td align="left" valign="top">94.36</td><td align="left" valign="top">94.36</td><td align="left" valign="top">88.01</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Stage IV</td><td align="left" valign="top">76.40</td><td align="left" valign="top">98.50</td><td align="left" valign="top">98.50</td><td align="left" valign="top">75.24</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Unstageable</td><td align="left" valign="top">81.44</td><td align="left" valign="top">96.17</td><td align="left" valign="top">96.17</td><td align="left" valign="top">85.15</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">SDTI</td><td align="left" valign="top">96.19</td><td align="left" valign="top">96.38</td><td align="left" valign="top">96.38</td><td align="left" valign="top">88.28</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Overall</td><td align="left" valign="top">82.42</td><td align="left" valign="top">96.40</td><td align="left" valign="top">95.90</td><td align="left" valign="top">97.66</td></tr><tr><td align="left" valign="top" colspan="6">ResNet 18</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Stage I</td><td align="left" valign="top">94.08</td><td align="left" valign="top">99.41</td><td align="left" valign="top">97.14</td><td align="left" valign="top">98.26</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Stage II</td><td align="left" valign="top">90.91</td><td align="left" valign="top">98.06</td><td align="left" valign="top">94.08</td><td align="left" valign="top">96.02</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Stage III</td><td align="left" valign="top">86.96</td><td align="left" valign="top">97.03</td><td align="left" valign="top">90.91</td><td align="left" valign="top">93.84</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Stage IV</td><td align="left" valign="top">92.13</td><td align="left" valign="top">99.50</td><td align="left" valign="top">86.96</td><td align="left" valign="top">92.80</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Unstageable</td><td align="left" valign="top">88.66</td><td align="left" valign="top">98.69</td><td align="left" valign="top">92.13</td><td align="left" valign="top">95.29</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">SDTI</td><td align="left" valign="top">97.14</td><td align="left" valign="top">97.91</td><td align="left" valign="top">88.66</td><td align="left" valign="top">93.95</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Overall</td><td align="left" valign="top">92.42</td><td align="left" valign="top">98.43</td><td align="left" valign="top">91.64</td><td align="left" valign="top">98.26</td></tr><tr><td align="left" valign="top" colspan="6">DenseNet121</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Stage I</td><td align="left" valign="top">93.42</td><td align="left" valign="top">99.03</td><td align="left" valign="top">95.24</td><td align="left" valign="top">97.09</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Stage II</td><td align="left" valign="top">96.10</td><td align="left" valign="top">97.88</td><td align="left" valign="top">93.42</td><td align="left" valign="top">95.59</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Stage III</td><td align="left" valign="top">78.26</td><td align="left" valign="top">98.70</td><td align="left" valign="top">96.10</td><td align="left" valign="top">97.38</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Stage IV</td><td align="left" valign="top">94.38</td><td align="left" valign="top">99.17</td><td align="left" valign="top">78.26</td><td align="left" valign="top">87.48</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Unstageable</td><td align="left" valign="top">91.75</td><td align="left" valign="top">99.06</td><td align="left" valign="top">94.38</td><td align="left" valign="top">96.66</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">SDTI</td><td align="left" valign="top">95.24</td><td align="left" valign="top">98.47</td><td align="left" valign="top">91.75</td><td align="left" valign="top">94.99</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Overall</td><td align="left" valign="top">93.71</td><td align="left" valign="top">98.72</td><td align="left" valign="top">91.53</td><td align="left" valign="top">97.09</td></tr></tbody></table><table-wrap-foot><fn id="table4fn1"><p><sup>a</sup>PI: pressure injury.</p></fn><fn id="table4fn2"><p><sup>b</sup>SDTI: suspected deep tissue injury.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>Among all the CNN models, DenseNet121 demonstrated the highest overall accuracy of 93.71%. The classification performances of AlexNet, VGGNet16, and ResNet18 exhibited overall accuracies of 87.74%, 82.42%, and 92.42%, respectively.</p><p>PIs are a global health issue, and effective treatment requires early and accurate classification and prevention. The staging of the PI is usually a subjective evaluation by medical professionals via standard systems, but it can differ due to variations in staff experience, training, and wound characteristics [<xref ref-type="bibr" rid="ref22">22</xref>].</p><p>CNNs are feedforward neural networks with convolutional computations and deep structures. They capture local image features regardless of position through their core convolutional operation. This allows CNN-based models to potentially reduce assessment disparities among medical personnel. This approach promises precise classification of PIs, ensuring consistent patient care and valuable advice post discharge, which improves quality of life and health care resource efficiency [<xref ref-type="bibr" rid="ref23">23</xref>].</p><p>However, many factors need to be considered when a reliable staging model is constructed (eg, the quality and quantity of images matter for model training). Several previous studies were based on retrospectively collected images in which other types of wounds were included, and the shooting equipment may not have been updated with lower image pixels, which affects the reliability of supervised learning in DL [<xref ref-type="bibr" rid="ref24">24</xref>-26]. Some researchers have attempted to classify PIs via neural network models on the basis of the clinical staging system; however, most of these studies have relied on public datasets containing a limited number of images per grade, typically only a few dozen [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref26">26</xref>]. It is recommended that more than 150 training images should be used per grade to achieve reasonable classification accuracy when resources are limited [<xref ref-type="bibr" rid="ref27">27</xref>].</p><p>Therefore, it is necessary to construct a large dataset of high quality with reliable labeling of each grade. In contrast, in this study, PI images were collected prospectively by professional cameras, and a gray card and executable shooting standard were developed to ensure that the quality of each image was clear and that the wound characteristics could be clearly displayed. Moreover, the staging results of each image were assessed by 2 wound therapists simultaneously. The wound therapists included were all qualified and had more than 15 years of working experience, so their assessment results were convincing, which contributed to the good comparability of the images and the reliable sample set. Additionally, we used a professional digital camera and a gray card, which were rarely used in previous studies, to ensure the quality of the images, particularly in accurately representing the original color of PIs [<xref ref-type="bibr" rid="ref28">28</xref>].</p><p>Compared with other studies, this study still has advantages in terms of classification accuracy. Ay et al [<xref ref-type="bibr" rid="ref24">24</xref>] used the European Pressure Ulcer Advisory Panel staging system and included PIs from stage 1 to stage 4. They trained 1091 images from a public dataset of PIs called the PIID (Public Injury Images Dataset) and 15 images from Google via DenseNet121, InceptionV3, MobileNetV2, ResNet152, ResNet50, and VGG16. The results indicated that the average accuracy of the 6 algorithms for each stage during pretraining ranged from 54.84% to 77.42% [<xref ref-type="bibr" rid="ref24">24</xref>]. Kim et al [<xref ref-type="bibr" rid="ref29">29</xref>] set SE-ResNext101 to train 2614 images from 493 participants. The accuracy of the model was 0.793 over the internal testing set and 0.717 over the external testing set [<xref ref-type="bibr" rid="ref29">29</xref>]. In our study, DenseNet121 exhibited the highest accuracy (93.71%), precision (98.72%), recall (91.53%), and <italic>F</italic><sub>1</sub>-score (97.09%), possibly because of the following reasons. (1) DenseNet121 improves the backpropagation of gradients due to the dense connection mode, making the network easier to train. Moreover, it can reduce the gradient disappearance problem caused by the transmission of input information and gradient information between many layers. (2) The number of parameters is reduced. (3) Low-dimensional features are preserved. In a standard convolutional network, the final output is only used to extract the highest-level features [<xref ref-type="bibr" rid="ref30">30</xref>]. It is also important to consider the algorithm and image quality. In our study, 5 wound therapists, all of whom were qualified and had been engaged in wound management for at least 15 years, were recruited for grading. Therefore, the results of image staging were relatively accurate, and the classification of each label learned by CNNs was also convincing. Overall, DenseNet121 has become a popular choice in the field of computer vision because of its effective training of deep networks, high performance, generalizability, and insights into the learning process. These advantages make them powerful tools for image recognition and classification tasks. However, DenseNet121 processes a large number of layers, is relatively time-consuming, and consumes considerable computing power. DenseNet121 is more suitable for our present small sample. In regard to large-scale datasets, from the perspective of computing power and time factors, ResNet-18 or a higher level, such as ResNet34 or ResNet50, may be a better choice [<xref ref-type="bibr" rid="ref31">31</xref>].</p><p>To improve the model&#x2019;s adaptability to skin color variations, future research should increase the sample size and expand collection areas. Additionally, implementing the model in clinical settings requires effective communication due to the perception of opacity in DL methods. Accountability is crucial in the medical field, as errors can have legal implications.</p></sec><sec id="s4-2"><title>Limitations</title><p>The images obtained from data segmentation in this study contain some normal skin tissue. The next step is to make the wound more prominent or to make a judgment after segmenting the image.</p><p>Clinical wound management is influenced by blood and fluid seepage. Future studies should integrate DL with 3D imaging, thermal imaging, and fluid seepage assessment for a more comprehensive wound assessment.</p><p>The study also faced challenges due to limited prospective image data and reliance on a single device, potentially leading to biases and affecting model accuracy. This study did not compare the model&#x2019;s performance with that of wound therapists or nurses in discriminating pressure ulcers; instead, it focused on internal validation and algorithm comparison.</p></sec><sec id="s4-3"><title>Conclusions</title><p>Staging models that use CNNs as their foundation exhibit robust classification capabilities. However, further research is needed to validate the reliability of these observed results. As such, we intend to gather an extensive array of additional imagery and undertake a comparative analysis between the staging outcomes generated by the model and those achieved by frontline clinical nurses. This comparative assessment will allow us to identify any potential discrepancies and disparities between the two, thereby affording us valuable insights for refining the model&#x2019;s performance and suggesting effective strategies for enhancing the skills and capabilities of nurses.</p></sec></sec></body><back><ack><p>The authors would like to thank the 5 wound therapists for their assistance in assessing staging and</p><p>Duoxiang Zhao from the Pittsburgh Institute, Sichuan University who finished part of the image processin.This study is funded by the 1&#x00B7;3&#x00B7;5 projects for artificial intelligence (ZYAI24029) at West China Hospital, Sichuan University.</p></ack><notes><sec><title>Data Availability</title><p>The raw data supporting the conclusions of this paper will be made available by the authors without undue reservation.</p></sec></notes><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">CNN</term><def><p>convolutional neural network</p></def></def-item><def-item><term id="abb2">DL</term><def><p>deep learning</p></def></def-item><def-item><term id="abb3">NPIAP</term><def><p>National Pressure Injury Advisory Panel</p></def></def-item><def-item><term id="abb4">PI</term><def><p>pressure injury</p></def></def-item><def-item><term id="abb5">PIID</term><def><p>Public Injury Images Dataset</p></def></def-item><def-item><term id="abb6">SDTI</term><def><p>suspected deep tissue injury</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Edsberg</surname><given-names>LE</given-names> </name><name name-style="western"><surname>Black</surname><given-names>JM</given-names> </name><name name-style="western"><surname>Goldberg</surname><given-names>M</given-names> </name><name name-style="western"><surname>McNichol</surname><given-names>L</given-names> </name><name name-style="western"><surname>Moore</surname><given-names>L</given-names> </name><name name-style="western"><surname>Sieggreen</surname><given-names>M</given-names> </name></person-group><article-title>Revised national pressure ulcer advisory panel pressure injury staging system: revised pressure injury staging system</article-title><source>J Wound Ostomy Continence Nurs</source><year>2016</year><volume>43</volume><issue>6</issue><fpage>585</fpage><lpage>597</lpage><pub-id pub-id-type="doi">10.1097/WON.0000000000000281</pub-id><pub-id pub-id-type="medline">27749790</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Lin</surname><given-names>F</given-names> </name><name name-style="western"><surname>Thalib</surname><given-names>L</given-names> </name><name name-style="western"><surname>Chaboyer</surname><given-names>W</given-names> </name></person-group><article-title>Global prevalence and incidence of pressure injuries in hospitalised adult patients: a systematic review and meta-analysis</article-title><source>Int J Nurs Stud</source><year>2020</year><month>05</month><volume>105</volume><fpage>103546</fpage><pub-id pub-id-type="doi">10.1016/j.ijnurstu.2020.103546</pub-id><pub-id pub-id-type="medline">32113142</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bennett</surname><given-names>G</given-names> </name><name name-style="western"><surname>Dealey</surname><given-names>C</given-names> </name><name name-style="western"><surname>Posnett</surname><given-names>J</given-names> </name></person-group><article-title>The cost of pressure ulcers in the UK</article-title><source>Age Ageing</source><year>2004</year><month>05</month><volume>33</volume><issue>3</issue><fpage>230</fpage><lpage>235</lpage><pub-id pub-id-type="doi">10.1093/ageing/afh086</pub-id><pub-id pub-id-type="medline">15082426</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="book"><source>Prevention and Treatment of Pressure Ulcers/Injuries: Clinical Practice Guideline</source><year>2019</year><edition>3</edition><publisher-name>European Pressure Ulcer Advisory Panel, National Pressure Injury Advisory Panel, and Pan Pacific Pressure Injury Alliance</publisher-name><pub-id pub-id-type="other">978-0-6480097-8-8</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tschannen</surname><given-names>D</given-names> </name><name name-style="western"><surname>Mckay</surname><given-names>M</given-names> </name><name name-style="western"><surname>Steven</surname><given-names>M</given-names> </name></person-group><article-title>Improving pressure ulcer staging accuracy through a nursing student experiential intervention</article-title><source>J Nurs Educ</source><year>2016</year><month>05</month><day>1</day><volume>55</volume><issue>5</issue><fpage>266</fpage><lpage>270</lpage><pub-id pub-id-type="doi">10.3928/01484834-20160414-05</pub-id><pub-id pub-id-type="medline">27115453</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Young</surname><given-names>DL</given-names> </name><name name-style="western"><surname>Shen</surname><given-names>JJ</given-names> </name><name name-style="western"><surname>Estocado</surname><given-names>N</given-names> </name><name name-style="western"><surname>Landers</surname><given-names>MR</given-names> </name></person-group><article-title>Financial impact of improved pressure ulcer staging in the acute hospital with use of a new tool, the NE1 Wound Assessment Tool</article-title><source>Adv Skin Wound Care</source><year>2012</year><month>04</month><volume>25</volume><issue>4</issue><fpage>158</fpage><lpage>166</lpage><pub-id pub-id-type="doi">10.1097/01.ASW.0000413597.20438.d2</pub-id><pub-id pub-id-type="medline">22441048</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zahia</surname><given-names>S</given-names> </name><name name-style="western"><surname>Garcia Zapirain</surname><given-names>MB</given-names> </name><name name-style="western"><surname>Sevillano</surname><given-names>X</given-names> </name><name name-style="western"><surname>Gonz&#x00E1;lez</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>PJ</given-names> </name><name name-style="western"><surname>Elmaghraby</surname><given-names>A</given-names> </name></person-group><article-title>Pressure injury image analysis with machine learning techniques: a systematic review on previous and possible future methods</article-title><source>Artif Intell Med</source><year>2020</year><month>01</month><volume>102</volume><fpage>101742</fpage><pub-id pub-id-type="doi">10.1016/j.artmed.2019.101742</pub-id><pub-id pub-id-type="medline">31980110</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Currie</surname><given-names>G</given-names> </name><name name-style="western"><surname>Hawk</surname><given-names>KE</given-names> </name><name name-style="western"><surname>Rohren</surname><given-names>E</given-names> </name><name name-style="western"><surname>Vial</surname><given-names>A</given-names> </name><name name-style="western"><surname>Klein</surname><given-names>R</given-names> </name></person-group><article-title>Machine learning and deep learning in medical imaging: intelligent imaging</article-title><source>J Med Imaging Radiat Sci</source><year>2019</year><month>12</month><volume>50</volume><issue>4</issue><fpage>477</fpage><lpage>487</lpage><pub-id pub-id-type="doi">10.1016/j.jmir.2019.09.005</pub-id><pub-id pub-id-type="medline">31601480</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Esteva</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kuprel</surname><given-names>B</given-names> </name><name name-style="western"><surname>Novoa</surname><given-names>RA</given-names> </name><etal/></person-group><article-title>Dermatologist-level classification of skin cancer with deep neural networks</article-title><source>Nature New Biol</source><year>2017</year><month>02</month><day>2</day><volume>542</volume><issue>7639</issue><fpage>115</fpage><lpage>118</lpage><pub-id pub-id-type="doi">10.1038/nature21056</pub-id><pub-id pub-id-type="medline">28117445</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>JH</given-names> </name><name name-style="western"><surname>Shin</surname><given-names>HW</given-names> </name><name name-style="western"><surname>Ha</surname><given-names>C</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>SY</given-names> </name><name name-style="western"><surname>Go</surname><given-names>T</given-names> </name></person-group><article-title>Diagnosis of pressure ulcer stage using on-device AI</article-title><source>Appl Sci (Basel)</source><year>2024</year><volume>14</volume><issue>16</issue><fpage>7124</fpage><pub-id pub-id-type="doi">10.3390/app14167124</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Swerdlow</surname><given-names>M</given-names> </name><name name-style="western"><surname>Guler</surname><given-names>O</given-names> </name><name name-style="western"><surname>Yaakov</surname><given-names>R</given-names> </name><name name-style="western"><surname>Armstrong</surname><given-names>DG</given-names> </name></person-group><article-title>Simultaneous segmentation and classification of pressure injury image data using Mask-R-CNN</article-title><source>Comput Math Methods Med</source><year>2023</year><volume>2023</volume><fpage>3858997</fpage><pub-id pub-id-type="doi">10.1155/2023/3858997</pub-id><pub-id pub-id-type="medline">36778787</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Seo</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Eom</surname><given-names>IH</given-names> </name><etal/></person-group><article-title>Visual classification of pressure injury stages for nurses: a deep learning model applying modern convolutional neural networks</article-title><source>J Adv Nurs</source><year>2023</year><month>08</month><volume>79</volume><issue>8</issue><fpage>3047</fpage><lpage>3056</lpage><pub-id pub-id-type="doi">10.1111/jan.15584</pub-id><pub-id pub-id-type="medline">36752192</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="web"><article-title>National pressure injury advisory panel</article-title><access-date>2025-03-11</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://npiap.com/">https://npiap.com/</ext-link></comment></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fleiss</surname><given-names>JL</given-names> </name></person-group><article-title>Measuring nominal scale agreement among many raters</article-title><source>Psychol Bull</source><year>1971</year><volume>76</volume><issue>5</issue><fpage>378</fpage><lpage>382</lpage><pub-id pub-id-type="doi">10.1037/h0031619</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chlap</surname><given-names>P</given-names> </name><name name-style="western"><surname>Min</surname><given-names>H</given-names> </name><name name-style="western"><surname>Vandenberg</surname><given-names>N</given-names> </name><name name-style="western"><surname>Dowling</surname><given-names>J</given-names> </name><name name-style="western"><surname>Holloway</surname><given-names>L</given-names> </name><name name-style="western"><surname>Haworth</surname><given-names>A</given-names> </name></person-group><article-title>A review of medical image data augmentation techniques for deep learning applications</article-title><source>J Med Imaging Radiat Oncol</source><year>2021</year><month>08</month><volume>65</volume><issue>5</issue><fpage>545</fpage><lpage>563</lpage><pub-id pub-id-type="doi">10.1111/1754-9485.13261</pub-id><pub-id pub-id-type="medline">34145766</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kasajima</surname><given-names>I</given-names> </name></person-group><article-title>Measuring plant colors</article-title><source>Plant Biotechnol (Tokyo)</source><year>2019</year><volume>36</volume><issue>2</issue><fpage>63</fpage><lpage>75</lpage><pub-id pub-id-type="doi">10.5511/plantbiotechnology.19.0322a</pub-id><pub-id pub-id-type="medline">31768106</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Huang</surname><given-names>L</given-names> </name><name name-style="western"><surname>Qin</surname><given-names>J</given-names> </name><name name-style="western"><surname>Zhou</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Zhu</surname><given-names>F</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>L</given-names> </name><name name-style="western"><surname>Shao</surname><given-names>L</given-names> </name></person-group><article-title>Normalization techniques in training DNNs: methodology, analysis and application</article-title><source>IEEE Trans Pattern Anal Mach Intell</source><year>2023</year><month>08</month><volume>45</volume><issue>8</issue><fpage>10173</fpage><lpage>10196</lpage><pub-id pub-id-type="doi">10.1109/TPAMI.2023.3250241</pub-id><pub-id pub-id-type="medline">37027763</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hosny</surname><given-names>KM</given-names> </name><name name-style="western"><surname>Kassem</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Fouad</surname><given-names>MM</given-names> </name></person-group><article-title>Classification of skin lesions into seven classes using transfer learning with AlexNet</article-title><source>J Digit Imaging</source><year>2020</year><month>10</month><volume>33</volume><issue>5</issue><fpage>1325</fpage><lpage>1334</lpage><pub-id pub-id-type="doi">10.1007/s10278-020-00371-9</pub-id><pub-id pub-id-type="medline">32607904</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Khan</surname><given-names>A</given-names> </name><name name-style="western"><surname>Khan</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ullah</surname><given-names>M</given-names> </name><name name-style="western"><surname>Alam</surname><given-names>MM</given-names> </name><name name-style="western"><surname>Bangash</surname><given-names>JI</given-names> </name><name name-style="western"><surname>Suud</surname><given-names>MM</given-names> </name></person-group><article-title>A computational classification method of breast cancer images using the VGGNet model</article-title><source>Front Comput Neurosci</source><year>2022</year><volume>16</volume><fpage>1001803</fpage><pub-id pub-id-type="doi">10.3389/fncom.2022.1001803</pub-id><pub-id pub-id-type="medline">36405784</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nayak</surname><given-names>T</given-names> </name><name name-style="western"><surname>Chadaga</surname><given-names>K</given-names> </name><name name-style="western"><surname>Sampathila</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Deep learning based detection of monkeypox virus using skin lesion images</article-title><source>Med Nov Technol Devices</source><year>2023</year><month>06</month><volume>18</volume><fpage>100243</fpage><pub-id pub-id-type="doi">10.1016/j.medntd.2023.100243</pub-id><pub-id pub-id-type="medline">37293134</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wu</surname><given-names>J</given-names> </name><name name-style="western"><surname>Hu</surname><given-names>W</given-names> </name><name name-style="western"><surname>Wen</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Tu</surname><given-names>W</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>X</given-names> </name></person-group><article-title>Skin lesion classification using densely connected convolutional networks with attention residual learning</article-title><source>Sensors (Basel)</source><year>2020</year><month>12</month><day>10</day><volume>20</volume><issue>24</issue><fpage>7080</fpage><pub-id pub-id-type="doi">10.3390/s20247080</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sayar</surname><given-names>S</given-names> </name><name name-style="western"><surname>A&#x015F;k&#x0131;n Ceran</surname><given-names>M</given-names> </name><name name-style="western"><surname>Demir</surname><given-names>A</given-names> </name></person-group><article-title>Determining the pressure injury and staging knowledge of nurses at a hospital in Turkey</article-title><source>J Tissue Viability</source><year>2022</year><month>11</month><volume>31</volume><issue>4</issue><fpage>735</fpage><lpage>740</lpage><pub-id pub-id-type="doi">10.1016/j.jtv.2022.08.004</pub-id><pub-id pub-id-type="medline">36041891</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jiang</surname><given-names>M</given-names> </name><name name-style="western"><surname>Ma</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Guo</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Using machine learning technologies in pressure injury management: systematic review</article-title><source>JMIR Med Inform</source><year>2021</year><month>03</month><day>10</day><volume>9</volume><issue>3</issue><fpage>e25704</fpage><pub-id pub-id-type="doi">10.2196/25704</pub-id><pub-id pub-id-type="medline">33688846</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ay</surname><given-names>B</given-names> </name><name name-style="western"><surname>Tasar</surname><given-names>B</given-names> </name><name name-style="western"><surname>Utlu</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Ay</surname><given-names>K</given-names> </name><name name-style="western"><surname>Aydin</surname><given-names>G</given-names> </name></person-group><article-title>Deep transfer learning-based visual classification of pressure injuries stages</article-title><source>Neural Comput Applic</source><year>2022</year><month>09</month><volume>34</volume><issue>18</issue><fpage>16157</fpage><lpage>16168</lpage><pub-id pub-id-type="doi">10.1007/s00521-022-07274-6</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="preprint"><person-group person-group-type="author"><name name-style="western"><surname>Fergus</surname><given-names>P</given-names> </name><name name-style="western"><surname>Chalmers</surname><given-names>C</given-names> </name><name name-style="western"><surname>Henderson</surname><given-names>W</given-names> </name><name name-style="western"><surname>Roberts</surname><given-names>D</given-names> </name><name name-style="western"><surname>Waraich</surname><given-names>A</given-names> </name></person-group><article-title>Pressure ulcer categorization using deep learning: a clinical trial to evaluate model performance</article-title><source>arXiv</source><comment>Preprint posted online on  Mar 7, 2022</comment><pub-id pub-id-type="doi">10.48550/arXiv.2203.06248</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lau</surname><given-names>CH</given-names> </name><name name-style="western"><surname>Yu</surname><given-names>KHO</given-names> </name><name name-style="western"><surname>Yip</surname><given-names>TF</given-names> </name><etal/></person-group><article-title>An artificial intelligence-enabled smartphone app for real-time pressure injury assessment</article-title><source>Front Med Technol</source><year>2022</year><volume>4</volume><fpage>905074</fpage><pub-id pub-id-type="doi">10.3389/fmedt.2022.905074</pub-id><pub-id pub-id-type="medline">36212608</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shahinfar</surname><given-names>S</given-names> </name><name name-style="western"><surname>Meek</surname><given-names>P</given-names> </name><name name-style="western"><surname>Falzon</surname><given-names>G</given-names> </name></person-group><article-title>&#x201C;How many images do I need?&#x201D; Understanding how sample size per class affects deep learning model performance metrics for balanced designs in autonomous wildlife monitoring</article-title><source>Ecol Inform</source><year>2020</year><month>05</month><volume>57</volume><fpage>101085</fpage><pub-id pub-id-type="doi">10.1016/j.ecoinf.2020.101085</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sampaio</surname><given-names>CS</given-names> </name><name name-style="western"><surname>Atria</surname><given-names>PJ</given-names> </name><name name-style="western"><surname>Hirata</surname><given-names>R</given-names> </name><name name-style="western"><surname>Jorquera</surname><given-names>G</given-names> </name></person-group><article-title>Variability of color matching with different digital photography techniques and a gray reference card</article-title><source>J Prosthet Dent</source><year>2019</year><month>02</month><volume>121</volume><issue>2</issue><fpage>333</fpage><lpage>339</lpage><pub-id pub-id-type="doi">10.1016/j.prosdent.2018.03.009</pub-id><pub-id pub-id-type="medline">30093117</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kim</surname><given-names>J</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>C</given-names> </name><name name-style="western"><surname>Choi</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Augmented decision-making in wound care: evaluating the clinical utility of a deep-learning model for pressure injury staging</article-title><source>Int J Med Inform</source><year>2023</year><month>12</month><volume>180</volume><fpage>105266</fpage><pub-id pub-id-type="doi">10.1016/j.ijmedinf.2023.105266</pub-id><pub-id pub-id-type="medline">37866277</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liao</surname><given-names>T</given-names> </name><name name-style="western"><surname>Li</surname><given-names>L</given-names> </name><name name-style="western"><surname>Ouyang</surname><given-names>R</given-names> </name><etal/></person-group><article-title>Classification of asymmetry in mammography via the DenseNet convolutional neural network</article-title><source>Eur J Radiol Open</source><year>2023</year><month>12</month><volume>11</volume><fpage>100502</fpage><pub-id pub-id-type="doi">10.1016/j.ejro.2023.100502</pub-id><pub-id pub-id-type="medline">37448557</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sun</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Xue</surname><given-names>B</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>M</given-names> </name><name name-style="western"><surname>Yen</surname><given-names>GG</given-names> </name></person-group><article-title>Completely automated CNN architecture design based on blocks</article-title><source>IEEE Trans Neural Netw Learn Syst</source><year>2020</year><month>04</month><volume>31</volume><issue>4</issue><fpage>1242</fpage><lpage>1254</lpage><pub-id pub-id-type="doi">10.1109/TNNLS.2019.2919608</pub-id><pub-id pub-id-type="medline">31247572</pub-id></nlm-citation></ref></ref-list></back></article>