<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Med Inform</journal-id><journal-id journal-id-type="publisher-id">medinform</journal-id><journal-id journal-id-type="index">7</journal-id><journal-title>JMIR Medical Informatics</journal-title><abbrev-journal-title>JMIR Med Inform</abbrev-journal-title><issn pub-type="epub">2291-9694</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v13i1e80351</article-id><article-id pub-id-type="doi">10.2196/80351</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Rapid Liver Fibrosis Evaluation Using the UNet-ResNet50-32 &#x00D7; 4d Model in Magnetic Resonance Elastography: Retrospective Study</article-title></title-group><contrib-group><contrib contrib-type="author"><name name-style="western"><surname>Su</surname><given-names>Pei-Yuan</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Shih</surname><given-names>Han-Jie</given-names></name><degrees>BS</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Xu</surname><given-names>Jia-Lang</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff4">4</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Internal Medicine, Division of Gastroenterology, Changhua Christian Hospital</institution><addr-line>Changhua</addr-line><country>Taiwan</country></aff><aff id="aff2"><institution>Department of Post-Baccalaureate Medicine, College of Medicine, National Chung Hsing University</institution><addr-line>Taichung</addr-line><country>Taiwan</country></aff><aff id="aff3"><institution>Department of Medical Imaging, Changhua Christian Hospital</institution><addr-line>Changhua</addr-line><country>Taiwan</country></aff><aff id="aff4"><institution>Department of Applied Statistics, National Taichung University of Science and Technology</institution><addr-line>No. 129, Section 3, Sanmin Road, North District</addr-line><addr-line>Taichung</addr-line><country>Taiwan</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Coristine</surname><given-names>Andrew</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Ozkaya</surname><given-names>Efe</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Chang</surname><given-names>Fu-Min</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Shen</surname><given-names>Jun-Hong</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Jia-Lang Xu, PhD, Department of Applied Statistics, National Taichung University of Science and Technology, No. 129, Section 3, Sanmin Road, North District, Taichung, 404336, Taiwan, 886 4-2219-6076, 886 4-2219-6330; <email>jlxu.academy@gmail.com</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>20</day><month>10</month><year>2025</year></pub-date><volume>13</volume><elocation-id>e80351</elocation-id><history><date date-type="received"><day>09</day><month>07</month><year>2025</year></date><date date-type="rev-recd"><day>28</day><month>09</month><year>2025</year></date><date date-type="accepted"><day>28</day><month>09</month><year>2025</year></date></history><copyright-statement>&#x00A9; Pei-Yuan Su, Han-Jie Shih, Jia-Lang Xu. Originally published in JMIR Medical Informatics (<ext-link ext-link-type="uri" xlink:href="https://medinform.jmir.org">https://medinform.jmir.org</ext-link>), 20.10.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Informatics, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://medinform.jmir.org/">https://medinform.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://medinform.jmir.org/2025/1/e80351"/><abstract><sec><title>Background</title><p>Liver fibrosis is a pathological outcome of chronic liver injury and a hallmark of multiple chronic liver diseases. Magnetic resonance elastography (MRE) provides a non-invasive modality for evaluating the severity of liver fibrosis.</p></sec><sec><title>Objective</title><p>This study aimed to develop and evaluate deep learning&#x2013;based segmentation models for the automated assessment of liver fibrosis using MRE images, with a focus on comparing the performance of a conventional U-Net model and a UNet-ResNet50&#x2212;32 &#x00D7; 4d architecture model.</p></sec><sec sec-type="methods"><title>Methods</title><p>A retrospective analysis was conducted on 319 patients enrolled between January 2018 and December 2020. MRE images were processed and segmented using two U-Net&#x2013;based models. Model performance was assessed through correlation coefficients, intersection over union (IoU), and additional segmentation metrics.</p></sec><sec sec-type="results"><title>Results</title><p>The UNet-ResNet50&#x2212;32 &#x00D7; 4d model demonstrated strong agreement with ground truth annotations, achieving correlation coefficients of 0.952 in the training phase and 0.943 in the validation phase, along with an Dice score of 85.68%, confirming its high segmentation accuracy.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>The UNet-ResNet50&#x2212;32 &#x00D7; 4d model exhibited robust performance and may serve as a reliable tool for the rapid and accurate assessment of liver fibrosis severity. The integration of automated segmentation into MRE analysis has the potential to improve clinical workflows and support timely decision-making in the management of chronic liver disease.</p></sec></abstract><kwd-group><kwd>liver fibrosis</kwd><kwd>image segmentation</kwd><kwd>MRI elastography</kwd><kwd>magnetic resonance elastography</kwd><kwd>automated segmentation</kwd><kwd>timely decision-making</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><sec id="s1-1"><title>Research Background and Motivations</title><p>Liver fibrosis is a progressive process that culminates in cirrhosis, a condition associated with severe complications such as ascites, esophageal varices, and hepatic encephalopathy, which significantly shorten life expectancy [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. To better stratify disease severity and guide management, fibrosis is commonly staged into five categories (F0&#x2013;F4). F0 represents the absence of fibrosis, while F1&#x2013;F3 correspond to progressive but non-cirrhotic stages of chronic liver disease, during which timely intervention can slow or even reverse disease progression. F4 indicates established cirrhosis, marking a critical threshold where the risk of decompensation, hepatocellular carcinoma, and mortality rises sharply. This staging framework not only provides prognostic insights but also plays a pivotal role in therapeutic decision-making, surveillance strategies, and patient counseling. The progression of liver fibrosis is slow and takes years to progress from mild liver fibrosis to cirrhosis. If liver fibrosis is diagnosed early using quicker and more convenient tools, there is a chance to prevent further deterioration into cirrhosis [<xref ref-type="bibr" rid="ref3">3</xref>]. Currently, there are many tools for diagnosing liver fibrosis, with liver biopsy being the most accurate. However, due to the risk of bleeding, it is less commonly used [<xref ref-type="bibr" rid="ref4">4</xref>]. There are many non-invasive testing methods, such as ultrasound elastography and magnetic resonance elastography (MRE), which have a high capability for diagnosing liver fibrosis [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref6">6</xref>]. MRE requires manual circling of images for interpretative reading, which increases the time for doctors to interpret reports [<xref ref-type="bibr" rid="ref7">7</xref>]. Therefore, using image segmentation technology could help reduce the time for interpreting MRE for liver fibrosis and improve accuracy.</p><p>Artificial intelligence (AI) has been widely investigated in the medical field, with numerous applications across disease prediction, diagnosis, and clinical decision support. For instance, computed tomography (CT) imaging has shown strong potential in predicting cholangiocarcinoma recurrence [<xref ref-type="bibr" rid="ref8">8</xref>], while machine learning&#x2013;based image analysis methods have been applied for diabetic foot evaluation [<xref ref-type="bibr" rid="ref9">9</xref>]. Convolutional neural networks (CNNs) have been employed to predict clinical outcomes in patients with stroke [<xref ref-type="bibr" rid="ref10">10</xref>], and a YOLOv8 model has demonstrated a high accuracy in early lung cancer detection [<xref ref-type="bibr" rid="ref11">11</xref>]. Similarly, combining chest X-rays with clinical features has yielded favorable area under the curve performance in osteoporosis screening [<xref ref-type="bibr" rid="ref12">12</xref>]. In health care operations, machine learning models have also been developed to predict emergency department patient flow, effectively estimating both hourly and daily visit volumes [<xref ref-type="bibr" rid="ref13">13</xref>]. Moreover, clinical decision support systems have been explored for prenatal abnormality diagnosis and ultrasound applications, though such studies have yet to incorporate maternal or fetal data during pregnancy [<xref ref-type="bibr" rid="ref14">14</xref>]. CNNs have been widely applied in liver tumor classification, while diverse architectures such as U-Net, UNet++, Residual Networks (ResNet), SegNet, and fully convolutional networks have been employed for semantic segmentation tasks [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref15">15</xref>-<xref ref-type="bibr" rid="ref18">18</xref>]. However, despite this progress, only one previous study has reported the application of CNNs for MRE measurement [<xref ref-type="bibr" rid="ref19">19</xref>]. By integrating the strengths of U-Net and ResNet, our Unet-ResNet model achieved superior segmentation performance and training stability, while maintaining strong agreement with manual evaluation.</p><p>Image segmentation represents another important domain of AI applications, whereby algorithms cluster elements of a similar nature into coherent segments [<xref ref-type="bibr" rid="ref20">20</xref>]. This technique has been increasingly applied in medical imaging, including ultrasound imaging [<xref ref-type="bibr" rid="ref21">21</xref>], CT scans [<xref ref-type="bibr" rid="ref22">22</xref>], magnetic resonance imaging (MRI) [<xref ref-type="bibr" rid="ref23">23</xref>], and X-ray imaging [<xref ref-type="bibr" rid="ref24">24</xref>]. In liver-related applications, segmentation methods such as real-time liver ultrasound segmentation [<xref ref-type="bibr" rid="ref17">17</xref>] can greatly assist physicians in diagnosis and treatment planning [<xref ref-type="bibr" rid="ref25">25</xref>], while CT-based segmentation is useful for localizing liver tumors [<xref ref-type="bibr" rid="ref26">26</xref>]. Advanced approaches using deep CNNs [<xref ref-type="bibr" rid="ref27">27</xref>] and 3D deeply supervised networks [<xref ref-type="bibr" rid="ref16">16</xref>] have further improved automated liver segmentation performance. Researchers have compared U-Net and V-Net architectures to assess their effectiveness in the segmentation of microcalcifications [<xref ref-type="bibr" rid="ref28">28</xref>].</p></sec><sec id="s1-2"><title>Research Objectives</title><p>The aim of this study is to use image segmentation technology for the computational interpretation of MRE, enabling AI to automatically segment MRE images, accurately identify regions of interest, quantify liver fibrosis levels, and apply this approach to both training and validation cohorts. Early and reliable assessment of liver fibrosis is essential for timely clinical decision-making, yet conventional manual labeling of MRE images is time-consuming, operator-dependent, and prone to variability. To address these challenges, this study further investigates whether advanced architectures such as UNet-ResNet50&#x2212;32 &#x00D7; 4d can achieve superior predictive performance compared with the traditional U-Net model.</p></sec></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Study Overview</title><p>An overview of the automated workflow developed in this study is shown in <xref ref-type="fig" rid="figure1">Figure 1</xref>. This study incorporates an automated process in which patients undergo MRE examinations, and the imaging data are subsequently uploaded to the picture archiving and communication system. Once the upload is completed, the segmentation model is automatically triggered. During the validation phase, pixel values of the target regions are obtained directly from the Digital Imaging and Communications in Medicine (DICOM) images in the picture archiving and communication system, as interpreted by experienced radiologists. Since there are discrepancies between pixel values in DICOM format and those converted to JPEG, this study relies solely on the pixel values displayed in the original DICOM files for analysis. Due to discrepancies between pixel values in DICOM and JPEG formats, this study exclusively uses original DICOM pixel values for analysis. These values are automatically extracted using medical image processing libraries that read directly from the DICOM metadata and image matrix. After segmentation, the predicted masks and corresponding pixel values are overlaid on the original images and provided to radiologists to support clinical evaluation and decision.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Overview of the automated workflow for liver fibrosis assessment using MRE confidence maps, illustrating image preprocessing, segmentation with the UNet-ResNet50&#x2212;32&#x00D7;4d model, and quantitative analysis for clinical application. DICOM: Digital Imaging and Communications in Medicine; MRE: magnetic resonance elastography; PACS: picture archiving and communication system.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v13i1e80351_fig01.png"/></fig></sec><sec id="s2-2"><title>Ethical Considerations</title><p>The study was approved by the Institutional Review Board of Changhua Christian Hospital (approval no.: 210132). The Institutional Review Board waived the need for informed consent considering the retrospective nature of data collected. This study implemented stringent measures to protect the privacy of all participants by anonymizing all collected data to remove any personally identifiable information.</p></sec><sec id="s2-3"><title>Patients</title><p>This retrospective study collected patient data from January 2018 to December 2020. Patients were eligible if they underwent MRI for clinical indications and MRE was performed as part of the MRI protocol. Additional inclusion criteria were age greater than 20 years and the availability of demographic information (age and gender). Exclusion criteria consisted of incomplete imaging or missing demographic data. Based on these criteria, a total of 320 patients were initially identified, and 1 patient was excluded due to incomplete data, resulting in a final cohort of 319 patients included in the analysis. To ensure the broad applicability and usability of the proposed model, no further stratification was made according to the presence of chronic liver disease or fibrosis.</p></sec><sec id="s2-4"><title>MRE</title><p>MRI was performed with a 1.5-Tesla Aera magnet system by Siemens AG, equipped with a 16-channel phased-array body coil. The process involved a specialized MRE setup, integral to which was an acoustic driver system by Resoundant. The technical details of the MRE imaging sequence were as follows: a repetition time of 50 ms and an echo time of 22.7 ms, a flip angle of 25 degrees, and a bandwidth of 260 Hz/pixel. Additionally, the sequence settings included a hydrogen resonance frequency of 63.5 MHz, an acquisition matrix of 256 &#x00D7; 64, a section thickness of 5 mm, and a field of view of 400 &#x00D7; 400 mm<sup>2</sup>. For each patient, four to five confidence maps were automatically generated and post-processed directly on the MRI scanner workstation using the integrated MRE software on the Siemens Syngo MR VE11 system (MAGNETOM Aera, Skyra, and Avantofit; Siemens Healthineers), demonstrating adequate wave amplitudes in specific regions. Manual liver stiffness measurements were performed by an expert, who delineated the regions of interest on the confidence maps and calculated the stiffness values [<xref ref-type="bibr" rid="ref29">29</xref>]. The stage of liver fibrosis was classified into four categories based on criteria developed at the same institute as this study [<xref ref-type="bibr" rid="ref30">30</xref>]. Significant fibrosis was defined as stage F2 according to the METAVIR scoring system, corresponding to MRE values &#x2265;2.8 kPa.</p></sec><sec id="s2-5"><title>MRE Image Labeling</title><p>For this study, the model training was conducted using data from 92 patients, all of whom had confidence maps generated from MRE. The annotations for these training images were meticulously created by a gastroenterologist, ensuring the reliability and precision of the ground truth used in model development.</p></sec><sec id="s2-6"><title>Statistical Analysis</title><p>The <italic>t</italic> test was used for comparison of continues variables of baseline characteristics. Continuous variables were showed as mean (SD). The Pearson correlation coefficient was performed to measure the correlation of the two MRE measurements by manual and automatic methods. All statistical analysis were performed on SPSS version 22.0 (IBM Corp.), with two-tailed <italic>P</italic> values &#x003C;.05 indicating statistical significance.</p></sec><sec id="s2-7"><title>Model Training</title><p>The MRE images used in this study were acquired with a window center of 400 and a window width of 800, with an original resolution of 256&#x00D7;204 pixels. Since convolutional operations in the U-Net architecture require adequate spatial information for effective multi-scale feature extraction, all images were resized to 512&#x00D7;512 pixels using bilinear interpolation prior to model training. This resizing step was performed to preserve structural details and improve segmentation accuracy. For liver fibrosis segmentation, a hybrid deep learning model was implemented by combining U-Net with a ResNet50&#x2212;32 &#x00D7; 4d encoder, leveraging its capacity to extract multi-scale contextual features while maintaining spatial resolution. Model optimization aimed to enhance both training efficiency and segmentation performance through systematic adjustment of key hyperparameters, with a comprehensive tuning strategy applied across a range of values, as summarized in <xref ref-type="table" rid="table1">Table 1</xref>. To address the pixel-level classification task, the model employed the Binary Cross-Entropy with Logits Loss as shown in equation (1).</p><disp-formula id="E1"><label>(1)</label><mml:math id="eqn1"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mi>L</mml:mi><mml:mo>=</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>N</mml:mi></mml:mfrac><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi></mml:mrow></mml:munderover><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mi>log</mml:mi><mml:mo>&#x2061;</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>p</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mo>+</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mi>log</mml:mi><mml:mo>&#x2061;</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>p</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mstyle></mml:mstyle></mml:mrow></mml:mstyle></mml:math></disp-formula><p>&#x2003;Where <inline-formula><mml:math id="ieqn1"><mml:msub><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> denotes the true label of the <inline-formula><mml:math id="ieqn2"><mml:mi>i</mml:mi></mml:math></inline-formula>-th sample, <inline-formula><mml:math id="ieqn3"><mml:msub><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> represents the predicted probability that the sample belongs to the positive class, and <inline-formula><mml:math id="ieqn4"><mml:mi>N</mml:mi></mml:math></inline-formula> indicates the total number of samples.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Hyperparameter tuning strategy and optimal settings for the model.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Hyperparameter</td><td align="left" valign="bottom">Values</td></tr></thead><tbody><tr><td align="left" valign="top">Batch size</td><td align="left" valign="top">32</td></tr><tr><td align="left" valign="top">Loss</td><td align="left" valign="top">Binary Cross-Entropy with Logits Loss</td></tr><tr><td align="left" valign="top">Optimizer</td><td align="left" valign="top">Stochastic Gradient Descent</td></tr><tr><td align="left" valign="top">Learning rate</td><td align="left" valign="top">3&#x00D7;10&#x207B;&#x2076;&#x2013;3&#x00D7;10&#x207B;&#x00B2;</td></tr><tr><td align="left" valign="top">Weight decay</td><td align="left" valign="top">0&#x2010;0.01</td></tr><tr><td align="left" valign="top">Momentum</td><td align="left" valign="top">0.80&#x2010;0.99</td></tr></tbody></table></table-wrap></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Cohort Characteristics</title><p>A total of 319 patients were enrolled in the study and divided into two cohorts, with 91 patients assigned to the training group and 228 patients allocated to the testing group. The baseline characteristics of the two cohorts are summarized in <xref ref-type="table" rid="table2">Table 2</xref>. The mean (SD) age was 57.2 (12.4) years in the training group and 52.6 (12.3) years in the testing group. There were no statistically significant differences in age, gender distribution, height, weight, or BMI between the two groups, as all <italic>P</italic> values were greater than .05. Similarly, the mean (SD) MRE stiffness values obtained by manual measurement were 4.51 (2.85) kPa in the training group and 3.69 (2.26) kPa in the testing group, with a <italic>P</italic> value of .09. The mean (SD) automated measurement values were 4.04 (1.93) kPa in the training group and 3.69 (2.28) kPa in the testing group with a <italic>P</italic> value of .41, also showing no significant difference. In contrast, a significant difference was observed in the proportion of patients with clinically significant fibrosis stage equal to or greater than F2. Based on manual MRE assessment, 76% (69/91) of patients in the training group had fibrosis stage F2 or higher compared with 52% (118/228) in the testing group, with a <italic>P</italic> value of less than .001. Automated MRE analysis identified 75% (68/91) of patients in the training group and 50% (114/228) in the testing group with fibrosis stage F2 or higher, also with a <italic>P</italic> value of less than .001. These findings confirm the consistency between manual and automated staging, while indicating that the prevalence of significant fibrosis was higher in the training cohort.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>The baseline characteristics of the training group and testing group.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Characteristics</td><td align="left" valign="bottom">Training group (n=91)</td><td align="left" valign="bottom">Testing group (n=228)</td><td align="left" valign="bottom"><italic>P</italic> value</td></tr></thead><tbody><tr><td align="left" valign="top">Age, years, mean (SD)</td><td align="left" valign="top">57.2 (12.4)</td><td align="left" valign="top">52.6 (12.3)</td><td align="left" valign="top">.77</td></tr><tr><td align="left" valign="top">Gender, male (%)</td><td align="left" valign="top">48 (53)</td><td align="left" valign="top">137 (60)</td><td align="left" valign="top">.23</td></tr><tr><td align="left" valign="top">Height, m, mean (SD)</td><td align="left" valign="top">1.62 (0.08)</td><td align="left" valign="top">1.65 (0.08)</td><td align="left" valign="top">.97</td></tr><tr><td align="left" valign="top">Weight, kg, mean (SD)</td><td align="left" valign="top">65.8 (13.2)</td><td align="left" valign="top">67 (12.5)</td><td align="left" valign="top">.89</td></tr><tr><td align="left" valign="top">BMI, kg/m<sup>2</sup>, mean (SD)</td><td align="left" valign="top">25.1 (4.2)</td><td align="left" valign="top">24.6 (3.7)</td><td align="left" valign="top">.21</td></tr><tr><td align="left" valign="top">MRE<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup> (manual), kPa, mean (SD)</td><td align="left" valign="top">4.51 (2.85)</td><td align="left" valign="top">3.69 (2.26)</td><td align="left" valign="top">.09</td></tr><tr><td align="left" valign="top">MRE (automatic), kPa, mean (SD)</td><td align="left" valign="top">4.04 (1.93)</td><td align="left" valign="top">3.69 (2.28)</td><td align="left" valign="top">.41</td></tr><tr><td align="left" valign="top">Fibrosis stage (&#x2265; F2<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup>) (manual), n (%)</td><td align="left" valign="top">69 (76)</td><td align="left" valign="top">118 (52)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Fibrosis stage (&#x2265; F2<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup>) (automatic), n (%)</td><td align="left" valign="top">68 (75)</td><td align="left" valign="top">114 (50)</td><td align="left" valign="top">&#x003C;.001</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>MRE: magnetic resonance elastography.</p></fn><fn id="table2fn2"><p><sup>b</sup>Liver fibrosis severity was determined using MRE, with stage F2 defined as MRE &#x2265;2.8 kPa  [<xref ref-type="bibr" rid="ref30">30</xref>].</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-2"><title>Automatic Labeling Process</title><p><xref ref-type="fig" rid="figure2">Figure 2</xref> illustrates the template for the auto-labeling process generated by the UNet-ResNet50&#x2212;32 &#x00D7; 4d algorithm, a widely validated and robust architecture for medical image segmentation deep learning architecture specifically designed for semantic segmentation tasks. The auto-labeling template demonstrates the effectiveness of the model in accurately identifying and segmenting key regions of interest within the MRE confidence maps, aligning closely with expert annotations.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Overall architecture of the proposed UNet-ResNet50&#x2212;32&#x00D7;4d model for automated liver segmentation and fibrosis staging using MRE confidence maps. MRE: magnetic resonance elastography.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v13i1e80351_fig02.png"/></fig></sec><sec id="s3-3"><title>Experimental Environment</title><p>All experiments in this study were conducted on a high-performance workstation equipped with an NVIDIA RTX 4090 GPU, an Intel Core i9-13900K CPU, and 128 GB of RAM. The deep learning models were implemented using PyTorch with CUDA 11.8 (torch 2.1.0+ cu118), enabling accelerated training and inference.</p></sec><sec id="s3-4"><title>Parameters Setting</title><p><xref ref-type="table" rid="table3">Table 3</xref> presents the optimal parameter values identified in this study through training with the UNet-ResNet50&#x2212;32 &#x00D7; 4d algorithm, which include a learning rate of 0.005, momentum of 0.982, and weight decay of 4.457e-06.</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Optimal hyperparameters derived from model training.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Parameters</td><td align="left" valign="bottom">Value</td></tr></thead><tbody><tr><td align="left" valign="top">Epoch</td><td align="left" valign="top">32</td></tr><tr><td align="left" valign="top">Learning rate</td><td align="left" valign="top">0.005</td></tr><tr><td align="left" valign="top">Momentum</td><td align="left" valign="top">0.982</td></tr><tr><td align="left" valign="top">Weight decay</td><td align="left" valign="top">4.457e-06</td></tr></tbody></table></table-wrap></sec><sec id="s3-5"><title>Performance Evaluation</title><p>A comparison of the two models using the optimal parameters obtained from <xref ref-type="table" rid="table2">Table 2</xref> indicate that the UNet-ResNet50&#x2212;32 &#x00D7; 4d model achieved better predictive performance on the test dataset, whereas the standard U-Net model failed to produce any meaningful predictions. The average Dice coefficient, intersection over union (IoU), and F1-score for the UNet-ResNet50&#x2212;32 &#x00D7; 4d model were 85.68%, 75.80%, and 85.68%, respectively; the corresponding values for the U-Net model were 82.59%, 75.92%, and 82.59%, respectively. The slightly lower IoU may be attributed to the presence of outliers during pixel-wise evaluation, which could have affected the overall segmentation accuracy. The segmentation performance was quantitatively assessed using the Dice coefficient, IoU, and F1-score, as shown in equations (2)&#x2013;(4)</p><disp-formula id="E2"><label>(2)</label><mml:math id="eqn2"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mi>D</mml:mi><mml:mi>i</mml:mi><mml:mi>c</mml:mi><mml:mi>e</mml:mi><mml:mo>=</mml:mo><mml:mn>2</mml:mn><mml:mo>&#x2228;</mml:mo><mml:mi>P</mml:mi><mml:mo>&#x2229;</mml:mo><mml:mi>G</mml:mi><mml:mo>&#x2228;</mml:mo><mml:mfrac><mml:mrow/><mml:mrow><mml:mrow><mml:mo>|</mml:mo><mml:mi>P</mml:mi><mml:mo>|</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mi>G</mml:mi><mml:mo>&#x2228;</mml:mo></mml:mrow></mml:mfrac></mml:mstyle></mml:mrow></mml:mstyle></mml:math></disp-formula><disp-formula id="E3"><label>(3)</label><mml:math id="eqn3"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mi>I</mml:mi><mml:mi>o</mml:mi><mml:mi>U</mml:mi><mml:mo>=</mml:mo><mml:mi>P</mml:mi><mml:mo>&#x2229;</mml:mo><mml:mi>G</mml:mi><mml:mo>&#x2228;</mml:mo><mml:mfrac><mml:mrow/><mml:mrow><mml:mi>P</mml:mi><mml:mo>&#x222A;</mml:mo><mml:mi>G</mml:mi><mml:mo>&#x2228;</mml:mo></mml:mrow></mml:mfrac></mml:mstyle></mml:mrow></mml:mstyle></mml:math></disp-formula><disp-formula id="E4"><label>(4)</label><mml:math id="eqn4"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mi>F</mml:mi><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>2</mml:mn><mml:mo>&#x2217;</mml:mo><mml:mi>P</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mo>&#x2217;</mml:mo><mml:mi>R</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mi>l</mml:mi></mml:mrow><mml:mrow><mml:mi>P</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mo>+</mml:mo><mml:mi>R</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:mfrac></mml:mstyle></mml:mrow></mml:mstyle></mml:math></disp-formula><p><xref ref-type="fig" rid="figure3">Figure 3</xref> illustrates the segmentation results for liver fibrosis imaging. Overall, the figure underscores the enhanced performance of the proposed model in automated segmentation of liver fibrosis imaging and its potential clinical applicability.</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Comparative segmentation performance of conventional U-Net and UNet-ResNet50&#x2212;32&#x00D7;4d architectures on MRE confidence maps for liver fibrosis assessment. Panel (A) shows the original MRE confidence map, while panel (B) presents the segmentation outcome generated by the conventional U-Net model, which delineates the major hepatic region but demonstrates limitations in boundary refinement and structural detail. In contrast, panel (C) depicts the result obtained using the UNet-ResNet50&#x2212;32&#x00D7;4d model, which more accurately captures hepatic contours and structural features, highlighting its superior capability in feature extraction and region identification. MRE: magnetic resonance elastography.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v13i1e80351_fig03.png"/></fig><p><xref ref-type="table" rid="table4">Table 4</xref> presents the performance of the proposed UNet-ResNet50&#x2212;32&#x00D7;4d model further evaluated across different fibrosis stages. The segmentation accuracy remained consistently high in the early stages F0-F1, with Dice scores exceeding 87% and IoU values around 78%. Performance was slightly reduced in the intermediate stage F2 and more prominently in stage F3, where Dice and IoU decreased to 80.54% and 70.49%, respectively. This decline may reflect the increased heterogeneity and irregularity of fibrosis distribution in advanced disease. Notably, the model regained relatively stable performance in stage F4, with Dice and IoU values of 84.00% and 73.14%, respectively. These findings suggest that while the model demonstrates robust segmentation across fibrosis stages, challenges remain in capturing complex tissue patterns in stage F3.</p><p><xref ref-type="fig" rid="figure4">Figure 4</xref> presents the correlation between MRE values obtained from the automated segmentation model and those measured manually by an expert gastroenterologist in the testing cohort. The analysis demonstrated a Pearson correlation coefficient of 0.943, confirming a strong positive linear relationship between the two approaches, even when applied to unseen data.</p><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>Segmentation performance of the UNet-ResNet50&#x2212;32&#x00D7;4d model across different fibrosis stages.</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Fibrosis stage</td><td align="left" valign="bottom">Dice (%)</td><td align="left" valign="bottom">Intersection over union (%)</td><td align="left" valign="bottom">F1-score (%)</td></tr></thead><tbody><tr><td align="left" valign="top">F0</td><td align="left" valign="top">87.47</td><td align="left" valign="top">78.26</td><td align="left" valign="top">87.47</td></tr><tr><td align="left" valign="top">F1</td><td align="left" valign="top">87.98</td><td align="left" valign="top">78.74</td><td align="left" valign="top">87.98</td></tr><tr><td align="left" valign="top">F2</td><td align="left" valign="top">84.11</td><td align="left" valign="top">73.85</td><td align="left" valign="top">84.11</td></tr><tr><td align="left" valign="top">F3</td><td align="left" valign="top">80.54</td><td align="left" valign="top">70.49</td><td align="left" valign="top">80.54</td></tr><tr><td align="left" valign="top">F4</td><td align="left" valign="top">84.00</td><td align="left" valign="top">73.14</td><td align="left" valign="top">84.00</td></tr></tbody></table></table-wrap><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Correlation between automated segmentation&#x2013;derived MRE values and manual expert measurements in the testing group. MRE: magnetic resonance electrography.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v13i1e80351_fig04.png"/></fig></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>Automated segmentation using the AI-based model for MRE measurement proved to be both reliable and effective when compared with manual segmentation. The strong correlation and reproducibility observed between the automated and manual approaches highlight the potential of this tool as a valuable aid in clinical practice. In this study, we further evaluated and compared the performance of a standard U-Net with a UNet-ResNet50&#x2212;32 &#x00D7; 4d architecture for medical image segmentation. The findings clearly demonstrated that the UNet-ResNet50&#x2212;32 &#x00D7; 4d model substantially outperformed the conventional U-Net in segmentation accuracy. Specifically, while the conventional U-Net achieved a Dice coefficient of 82.59% and an IoU of 72.92% on the test dataset, the proposed model further improved performance, reaching an average Dice coefficient of 85.68%, IoU of 75.80%, and F1-score of 85.68%. The slightly lower IoU relative to the Dice coefficient may be attributed to the penalization of pixel-level outliers in the union-based evaluation, which tends to disproportionately impact cases with small lesion boundaries or heterogeneous textures.</p><p>When evaluated across fibrosis stages, the model demonstrated stable performance in the early stages, with Dice scores of 87.47% and 87.98% for F0 and F1, respectively, and corresponding IoU values close to 78%. A modest decline was observed in F2 with a Dice score of 84.11% and IoU of 73.85%, with the lowest performance in F3 where the Dice score was 80.54% and IoU was 70.49%. This likely reflects the increased heterogeneity and irregularity of fibrosis distribution in advanced disease. Interestingly, the model recovered performance in F4, achieving a Dice score of 84.00% and IoU of 73.14%, suggesting that once cirrhosis is established, the fibrotic patterns may become more homogeneous and therefore easier for the model to delineate.</p></sec><sec id="s4-2"><title>Comparison to Prior Work</title><p>MRE is a highly effective non-invasive tool for evaluating liver fibrosis and serves as a valuable alternative to liver biopsy. Clinicians typically assess regions of interest using elastograms with overlaid confidence maps generated following MRI scans [<xref ref-type="bibr" rid="ref31">31</xref>]. A previous study has investigated automated approaches for MRE measurement [<xref ref-type="bibr" rid="ref32">32</xref>]. An early method applied intensity membership functions combined with random walker segmentation to differentiate liver tissue from surrounding structures, achieving a correlation of 0.981 with manual measurements [<xref ref-type="bibr" rid="ref32">32</xref>]. Another study introduced volumetric segmentation using semi-automated proprietary software to evaluate liver stiffness. Their findings revealed significant differences between region of interest-based and volumetric analyses, suggesting that volumetric methods may provide better detection of heterogeneous fibrosis [<xref ref-type="bibr" rid="ref33">33</xref>]. More recently, a CNN-based framework was applied to MRE, reporting an intraclass correlation coefficient of 0.99 between automated and manual assessments in clinical patients [<xref ref-type="bibr" rid="ref19">19</xref>]. In this study, we applied the Unet-ResNet50 model to segment the liver on MRE confidence maps and measure liver stiffness. Our approach yielded a strong correlation of 0.943 with manual expert measurements, supporting its potential utility in clinical practice. To the best of our knowledge, this is the first study to implement a Unet-ResNet hybrid architecture for MRE segmentation and measurement. A previous study conducted in healthy volunteers reported excellent consistency and segmentation performance, with liver Dice scores reaching 0.95 [<xref ref-type="bibr" rid="ref34">34</xref>]. This study focused on automated liver fibrosis staging (F0&#x2013;F4) and extraction of MRE-derived stiffness values in kilopascals. Unlike studies limited to healthy subjects, our cohort consisted of clinical patients, in whom imaging data inherently exhibit greater heterogeneity.</p></sec><sec id="s4-3"><title>Limitations</title><p>This study has several limitations. Only gray-scale elastograms with 95% confidence maps were used for segmentation, which may have inadvertently included non-hepatic tissues such as the gallbladder fossa and large blood vessels. Despite this, the correlation between manual and automated methods remained strong. Further refinement of the segmentation process is needed to achieve more accurate anatomical delineation of the liver using the UNet-ResNet model. Key clinical data related to liver disease, such as iron levels, steatosis, and viral markers, were not collected. Severe steatosis and iron overload may interfere with MRE measurements and could not be adequately accounted for in this analysis. Histological fibrosis scores were unavailable for most participants, as liver biopsy was not routinely performed. The absence of biopsy confirmation limits the accuracy of staging, since MRE values alone may not fully capture the histopathological spectrum of fibrosis. This introduces a potential risk of mislabeling fibrosis severity, particularly in borderline cases or in patients with overlapping liver conditions. This study is subject to data imbalance, as the distribution of patients across different disease severities was uneven, which may affect the generalizability and stability of the proposed model. Although the dataset was partitioned to ensure fairness in training and testing, the uneven distribution of disease severity remains a potential source of bias. In particular, when the model is trained on patients with more advanced disease, its application to cohorts with milder disease may result in an overestimation of fibrosis severity and diminished sensitivity to early-stage changes. Conversely, if a model is optimized for mild cases, it may underperform in advanced disease populations, resulting in systematic misclassification. These imbalances highlight the need for future studies that incorporate paired biopsy and imaging data, along with balanced cohorts across different severities, to validate the robustness and clinical applicability of automated MRE-based staging.</p></sec><sec id="s4-4"><title>Future Directions</title><p>Future investigations will need to progress beyond algorithmic refinement and incorporate the expansion of patient cohorts, thereby increasing statistical power and enhancing the robustness of predictive models. Validation through multi-center studies will establish reproducibility across institutions, imaging protocols, and heterogeneous patient demographics. In parallel, the integration of multimodal information, including complementary imaging techniques, clinical parameters, and biomarker profiles, will strengthen both predictive accuracy and clinical relevance. The inclusion of expert annotations from multiple specialists, encompassing hepatologists, radiologists, pathologists, and other domain experts, will reduce inter-observer variability and further reinforce the clinical validity of model outputs. These efforts will enable broader generalizability across diverse populations and disease severities, ultimately supporting translation into routine clinical practice.</p></sec><sec id="s4-5"><title>Conclusions</title><p>Early and accurate assessment of liver fibrosis is essential for enabling timely diagnosis and intervention. In this study, we developed a U-Net-ResNet50&#x2212;32 &#x00D7; 4d model to predict the severity of liver fibrosis. The model achieved correlation coefficients above 0.9 in both training and validation cohorts and reached an Dice score of 85.68%, demonstrating strong potential to support accurate fibrosis staging in clinical practice. Importantly, liver fibrosis staging currently lacks a universally accepted gold standard. While histology remains the traditional reference, it is invasive, limited by sampling error, and not always available. Our findings suggest that automated MRE-based methods may provide a reliable non-invasive alternative, although the absence of biopsy confirmation introduces potential uncertainties in staging accuracy.</p><p>The strong performance of our model highlights its practical value and potential to improve efficiency in clinical decision-making. Future research should validate these results in larger, multi-center cohorts, integrate complementary imaging modalities and clinical biomarkers to further strengthen predictive power, and explore real-time deployment within radiology workflows to maximize clinical applicability.</p></sec></sec></body><back><ack><p>The authors thank the Changhua Christian Hospital and all the authors of the original articles.</p><p>This research was funded by Changhua Christian Hospital (113-CCH-IRP-016).</p></ack><notes><sec><title>Data Availability</title><p>The datasets used and/or analyzed during the current study are available from the corresponding author on reasonable request.</p></sec></notes><fn-group><fn fn-type="con"><p>PYS and JLX wrote the main manuscript text and HJS helped data collection and data annotation. All authors reviewed the manuscript.</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">CNN</term><def><p>convolutional neural network</p></def></def-item><def-item><term id="abb3">CT</term><def><p>computed tomography</p></def></def-item><def-item><term id="abb4">DICOM</term><def><p>Digital Imaging and Communications in Medicine</p></def></def-item><def-item><term id="abb5">IoU</term><def><p>intersection over union</p></def></def-item><def-item><term id="abb6">MRE</term><def><p>magnetic resonance elastography</p></def></def-item><def-item><term id="abb7">MRI</term><def><p>magnetic resonance imaging</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Angeli</surname><given-names>P</given-names> </name><name name-style="western"><surname>Bernardi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Villanueva</surname><given-names>C</given-names> </name><etal/></person-group><article-title>EASL clinical practice guidelines for the management of patients with decompensated cirrhosis</article-title><source>J Hepatol</source><year>2018</year><month>08</month><volume>69</volume><issue>2</issue><fpage>406</fpage><lpage>460</lpage><pub-id pub-id-type="doi">10.1016/j.jhep.2018.03.024</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Suk</surname><given-names>KT</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>DJ</given-names> </name></person-group><article-title>Staging of liver fibrosis or cirrhosis: the role of hepatic venous pressure gradient measurement</article-title><source>World J Hepatol</source><year>2015</year><month>03</month><day>27</day><volume>7</volume><issue>3</issue><fpage>607</fpage><lpage>615</lpage><pub-id pub-id-type="doi">10.4254/wjh.v7.i3.607</pub-id><pub-id pub-id-type="medline">25848485</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Guix&#x00E9;-Muntet</surname><given-names>S</given-names> </name><name name-style="western"><surname>Quesada-V&#x00E1;zquez</surname><given-names>S</given-names> </name><name name-style="western"><surname>Gracia-Sancho</surname><given-names>J</given-names> </name></person-group><article-title>Pathophysiology and therapeutic options for cirrhotic portal hypertension</article-title><source>The Lancet Gastroenterology &#x0026; Hepatology</source><year>2024</year><month>07</month><volume>9</volume><issue>7</issue><fpage>646</fpage><lpage>663</lpage><pub-id pub-id-type="doi">10.1016/S2468-1253(23)00438-7</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Berzigotti</surname><given-names>A</given-names> </name><name name-style="western"><surname>Tsochatzis</surname><given-names>E</given-names> </name><name name-style="western"><surname>Boursier</surname><given-names>J</given-names> </name><etal/></person-group><article-title>EASL clinical practice guidelines on non-invasive tests for evaluation of liver disease severity and prognosis &#x2013; 2021 update</article-title><source>J Hepatol</source><year>2021</year><month>09</month><volume>75</volume><issue>3</issue><fpage>659</fpage><lpage>689</lpage><pub-id pub-id-type="doi">10.1016/j.jhep.2021.05.025</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>S</given-names> </name><name name-style="western"><surname>Sun</surname><given-names>X</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Liver fibrosis conventional and molecular imaging diagnosis update</article-title><source>J Liver</source><year>2019</year><volume>8</volume><issue>1</issue><fpage>236</fpage><pub-id pub-id-type="medline">31341723</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ozkaya</surname><given-names>E</given-names> </name><name name-style="western"><surname>Kennedy</surname><given-names>P</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Precision and test-retest repeatability of stiffness measurement with MR elastography: a multicenter phantom study</article-title><source>Radiology</source><year>2024</year><month>05</month><volume>311</volume><issue>2</issue><fpage>e233136</fpage><pub-id pub-id-type="doi">10.1148/radiol.233136</pub-id><pub-id pub-id-type="medline">38742971</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hoodeshenas</surname><given-names>S</given-names> </name><name name-style="western"><surname>Yin</surname><given-names>M</given-names> </name><name name-style="western"><surname>Venkatesh</surname><given-names>SK</given-names> </name></person-group><article-title>Magnetic resonance elastography of liver: current update</article-title><source>Top Magn Reson Imaging</source><year>2018</year><month>10</month><volume>27</volume><issue>5</issue><fpage>319</fpage><lpage>333</lpage><pub-id pub-id-type="doi">10.1097/RMR.0000000000000177</pub-id><pub-id pub-id-type="medline">30289828</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>J</given-names> </name><name name-style="western"><surname>Xi</surname><given-names>J</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>T</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>L</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>K</given-names> </name><name name-style="western"><surname>Ding</surname><given-names>X</given-names> </name></person-group><article-title>Diagnostic performance of computed tomography-based artificial intelligence for early recurrence of cholangiocarcinoma: systematic review and meta-analysis</article-title><source>J Med Internet Res</source><year>2025</year><month>09</month><day>18</day><volume>27</volume><fpage>e78306</fpage><pub-id pub-id-type="doi">10.2196/78306</pub-id><pub-id pub-id-type="medline">40905766</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lin</surname><given-names>PC</given-names> </name><name name-style="western"><surname>Li</surname><given-names>TC</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>TH</given-names> </name><etal/></person-group><article-title>Machine learning for diabetic foot care: accuracy trends and emerging directions in healthcare AI</article-title><source>Front Public Health</source><year>2025</year><volume>13</volume><fpage>1613946</fpage><pub-id pub-id-type="doi">10.3389/fpubh.2025.1613946</pub-id><pub-id pub-id-type="medline">40756392</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yin</surname><given-names>HC</given-names> </name><name name-style="western"><surname>Lei</surname><given-names>RL</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>JL</given-names> </name><name name-style="western"><surname>Lin</surname><given-names>CM</given-names> </name><name name-style="western"><surname>Hsu</surname><given-names>YL</given-names> </name></person-group><article-title>Enhancing stroke prognosis prediction using deep convolution neural networks</article-title><source>J Mech Med Biol</source><year>2025</year><month>06</month><volume>25</volume><issue>05</issue><pub-id pub-id-type="doi">10.1142/S0219519425400391</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Huang</surname><given-names>KY</given-names> </name><name name-style="western"><surname>Chung</surname><given-names>CL</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>JL</given-names> </name></person-group><article-title>Deep learning object detection-based early detection of lung cancer</article-title><source>Front Med (Lausanne)</source><year>2025</year><volume>12</volume><fpage>1567119</fpage><pub-id pub-id-type="doi">10.3389/fmed.2025.1567119</pub-id><pub-id pub-id-type="medline">40357272</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Yin</surname><given-names>X</given-names> </name><name name-style="western"><surname>Lai</surname><given-names>J</given-names> </name><name name-style="western"><surname>Luo</surname><given-names>K</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>D</given-names> </name></person-group><article-title>Fusion of X-ray images and clinical data for a multimodal deep learning prediction model of osteoporosis: algorithm development and validation study</article-title><source>JMIR Med Inform</source><year>2025</year><month>09</month><day>18</day><volume>13</volume><fpage>e70738</fpage><pub-id pub-id-type="doi">10.2196/70738</pub-id><pub-id pub-id-type="medline">40966528</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Vural</surname><given-names>O</given-names> </name><name name-style="western"><surname>Ozaydin</surname><given-names>B</given-names> </name><name name-style="western"><surname>Aram</surname><given-names>KY</given-names> </name><name name-style="western"><surname>Booth</surname><given-names>J</given-names> </name><name name-style="western"><surname>Lindsey</surname><given-names>BF</given-names> </name><name name-style="western"><surname>Ahmed</surname><given-names>A</given-names> </name></person-group><article-title>An artificial intelligence-based framework for predicting emergency department overcrowding: development and evaluation study</article-title><source>JMIR Med Inform</source><year>2025</year><month>09</month><day>17</day><volume>13</volume><fpage>e73960</fpage><pub-id pub-id-type="doi">10.2196/73960</pub-id><pub-id pub-id-type="medline">40961493</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lin</surname><given-names>X</given-names> </name><name name-style="western"><surname>Liang</surname><given-names>C</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>J</given-names> </name><name name-style="western"><surname>Lyu</surname><given-names>T</given-names> </name><name name-style="western"><surname>Ghumman</surname><given-names>N</given-names> </name><name name-style="western"><surname>Campbell</surname><given-names>B</given-names> </name></person-group><article-title>Artificial intelligence-augmented clinical decision support systems for pregnancy care: systematic review</article-title><source>J Med Internet Res</source><year>2024</year><month>09</month><day>16</day><volume>26</volume><fpage>e54737</fpage><pub-id pub-id-type="doi">10.2196/54737</pub-id><pub-id pub-id-type="medline">39283665</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lakshmipriya</surname><given-names>B</given-names> </name><name name-style="western"><surname>Pottakkat</surname><given-names>B</given-names> </name><name name-style="western"><surname>Ramkumar</surname><given-names>G</given-names> </name></person-group><article-title>Deep learning techniques in liver tumour diagnosis using CT and MR imaging - A systematic review</article-title><source>Artif Intell Med</source><year>2023</year><month>07</month><volume>141</volume><fpage>102557</fpage><pub-id pub-id-type="doi">10.1016/j.artmed.2023.102557</pub-id><pub-id pub-id-type="medline">37295904</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Dou</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>H</given-names> </name><name name-style="western"><surname>Jin</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Yu</surname><given-names>L</given-names> </name><name name-style="western"><surname>Qin</surname><given-names>J</given-names> </name><name name-style="western"><surname>Heng</surname><given-names>PA</given-names> </name></person-group><article-title>3D deeply supervised network for automatic liver segmentation from CT volumes</article-title><year>2016</year><month>10</month><conf-name>International conference on medical image computing and computer-assisted intervention</conf-name><fpage>149</fpage><lpage>157</lpage><pub-id pub-id-type="doi">10.1007/978-3-319-46723-8_18</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Vianna</surname><given-names>P</given-names> </name><name name-style="western"><surname>Kulbay</surname><given-names>M</given-names> </name><name name-style="western"><surname>Boustros</surname><given-names>P</given-names> </name><etal/></person-group><article-title>Automated liver segmentation and steatosis grading using deep learning on b-mode ultrasound images</article-title><year>2023</year><month>09</month><conf-name>2023 IEEE International Ultrasonics Symposium (IUS)</conf-name><conf-loc>Montreal, QC, Canada</conf-loc><fpage>1</fpage><lpage>4</lpage><pub-id pub-id-type="doi">10.1109/IUS51837.2023.10307501</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ozkaya</surname><given-names>E</given-names> </name><name name-style="western"><surname>Nieves-Vazquez</surname><given-names>HA</given-names> </name><name name-style="western"><surname>Yuce</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Automated liver magnetic resonance elastography quality control and liver stiffness measurement using deep learning</article-title><source>Abdom Radiol (NY)</source><year>2025</year><month>09</month><volume>50</volume><issue>9</issue><fpage>4100</fpage><lpage>4109</lpage><pub-id pub-id-type="doi">10.1007/s00261-025-04883-2</pub-id><pub-id pub-id-type="medline">40088296</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cunha</surname><given-names>GM</given-names> </name><name name-style="western"><surname>Delgado</surname><given-names>TI</given-names> </name><name name-style="western"><surname>Middleton</surname><given-names>MS</given-names> </name><etal/></person-group><article-title>Automated CNN-based analysis versus manual analysis for MR elastography in nonalcoholic fatty liver disease: intermethod agreement and fibrosis stage discriminative performance</article-title><source>AJR Am J Roentgenol</source><year>2022</year><month>08</month><volume>219</volume><issue>2</issue><fpage>224</fpage><lpage>232</lpage><pub-id pub-id-type="doi">10.2214/AJR.21.27135</pub-id><pub-id pub-id-type="medline">35107306</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ansari</surname><given-names>MY</given-names> </name><name name-style="western"><surname>Abdalla</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ansari</surname><given-names>MY</given-names> </name><etal/></person-group><article-title>Practical utility of liver segmentation methods in clinical surgeries and interventions</article-title><source>BMC Med Imaging</source><year>2022</year><month>05</month><day>24</day><volume>22</volume><issue>1</issue><fpage>97</fpage><pub-id pub-id-type="doi">10.1186/s12880-022-00825-2</pub-id><pub-id pub-id-type="medline">35610600</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>R</given-names> </name><name name-style="western"><surname>Zhou</surname><given-names>H</given-names> </name><name name-style="western"><surname>Fu</surname><given-names>P</given-names> </name><name name-style="western"><surname>Shen</surname><given-names>H</given-names> </name><name name-style="western"><surname>Bai</surname><given-names>Y</given-names> </name></person-group><article-title>A multiscale attentional unet model for automatic segmentation in medical ultrasound images</article-title><source>Ultrason Imaging</source><year>2023</year><month>07</month><volume>45</volume><issue>4</issue><fpage>159</fpage><lpage>174</lpage><pub-id pub-id-type="doi">10.1177/01617346231169789</pub-id><pub-id pub-id-type="medline">37114669</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bougourzi</surname><given-names>F</given-names> </name><name name-style="western"><surname>Distante</surname><given-names>C</given-names> </name><name name-style="western"><surname>Dornaika</surname><given-names>F</given-names> </name><name name-style="western"><surname>Taleb-Ahmed</surname><given-names>A</given-names> </name></person-group><article-title>PDAtt-Unet: pyramid dual-decoder attention Unet for Covid-19 infection segmentation from CT-scans</article-title><source>Med Image Anal</source><year>2023</year><month>05</month><volume>86</volume><fpage>102797</fpage><pub-id pub-id-type="doi">10.1016/j.media.2023.102797</pub-id><pub-id pub-id-type="medline">36966605</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Peng</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Jing</surname><given-names>S</given-names> </name><name name-style="western"><surname>Han</surname><given-names>L</given-names> </name><name name-style="western"><surname>Li</surname><given-names>T</given-names> </name><name name-style="western"><surname>Luo</surname><given-names>J</given-names> </name></person-group><article-title>A deep-learning approach for segmentation of liver tumors in magnetic resonance imaging using UNet++</article-title><source>BMC Cancer</source><year>2023</year><volume>23</volume><issue>1</issue><fpage>1060</fpage><pub-id pub-id-type="doi">10.1186/s12885-023-11432-x</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>H</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>D</given-names> </name><name name-style="western"><surname>Ding</surname><given-names>S</given-names> </name><name name-style="western"><surname>Gao</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Feng</surname><given-names>J</given-names> </name><name name-style="western"><surname>Wan</surname><given-names>S</given-names> </name></person-group><article-title>Rib segmentation algorithm for X-ray image based on unpaired sample augmentation and multi-scale network</article-title><source>Neural Comput &#x0026; Applic</source><year>2023</year><month>06</month><volume>35</volume><issue>16</issue><fpage>11583</fpage><lpage>11597</lpage><pub-id pub-id-type="doi">10.1007/s00521-021-06546-x</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ansari</surname><given-names>MY</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Meher</surname><given-names>PK</given-names> </name><name name-style="western"><surname>Dakua</surname><given-names>SP</given-names> </name></person-group><article-title>Dense-PSP-UNet: A neural network for fast inference liver ultrasound segmentation</article-title><source>Comput Biol Med</source><year>2023</year><month>02</month><volume>153</volume><fpage>106478</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.106478</pub-id><pub-id pub-id-type="medline">36603437</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>J</given-names> </name><name name-style="western"><surname>Niu</surname><given-names>Y</given-names> </name></person-group><article-title>Dual encoding DDS&#x2010;UNet liver tumour segmentation based on multi&#x2010;scale deep and shallow feature fusion</article-title><source>IET Image Process</source><year>2024</year><month>04</month><volume>18</volume><issue>5</issue><fpage>1189</fpage><lpage>1199</lpage><pub-id pub-id-type="doi">10.1049/ipr2.13018</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gross</surname><given-names>M</given-names> </name><name name-style="western"><surname>Huber</surname><given-names>S</given-names> </name><name name-style="western"><surname>Arora</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Automated MRI liver segmentation for anatomical segmentation, liver volumetry, and the extraction of radiomics</article-title><source>Eur Radiol</source><year>2024</year><month>08</month><volume>34</volume><issue>8</issue><fpage>5056</fpage><lpage>5065</lpage><pub-id pub-id-type="doi">10.1007/s00330-023-10495-5</pub-id><pub-id pub-id-type="medline">38217704</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hsu</surname><given-names>PY</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>JL</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>LL</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>MY</given-names> </name></person-group><article-title>Using deep learning to construct microcalcification clusters in a mammography prediction model</article-title><source>Innov Emerg Technol</source><year>2025</year><month>01</month><volume>12</volume><pub-id pub-id-type="doi">10.1142/S2737599425500306</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Guglielmo</surname><given-names>FF</given-names> </name><name name-style="western"><surname>Venkatesh</surname><given-names>SK</given-names> </name><name name-style="western"><surname>Mitchell</surname><given-names>DG</given-names> </name></person-group><article-title>Liver MR elastography technique and image interpretation: pearls and pitfalls</article-title><source>Radiographics</source><year>2019</year><volume>39</volume><issue>7</issue><fpage>1983</fpage><lpage>2002</lpage><pub-id pub-id-type="doi">10.1148/rg.2019190034</pub-id><pub-id pub-id-type="medline">31626569</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wu</surname><given-names>WP</given-names> </name><name name-style="western"><surname>Chou</surname><given-names>CT</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>RC</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>CW</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>KW</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>HK</given-names> </name></person-group><article-title>Non-invasive evaluation of hepatic fibrosis: the diagnostic performance of magnetic resonance elastography in patients with viral hepatitis B or C</article-title><source>PLoS One</source><year>2015</year><volume>10</volume><issue>10</issue><fpage>e0140068</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0140068</pub-id><pub-id pub-id-type="medline">26469342</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zerunian</surname><given-names>M</given-names> </name><name name-style="western"><surname>Masci</surname><given-names>B</given-names> </name><name name-style="western"><surname>Caruso</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Liver magnetic resonance elastography: focus on methodology, technique, and feasibility</article-title><source>Diagnostics (Basel)</source><year>2024</year><month>02</month><day>9</day><volume>14</volume><issue>4</issue><fpage>379</fpage><pub-id pub-id-type="doi">10.3390/diagnostics14040379</pub-id><pub-id pub-id-type="medline">38396418</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dzyubak</surname><given-names>B</given-names> </name><name name-style="western"><surname>Venkatesh</surname><given-names>SK</given-names> </name><name name-style="western"><surname>Manduca</surname><given-names>A</given-names> </name><name name-style="western"><surname>Glaser</surname><given-names>KJ</given-names> </name><name name-style="western"><surname>Ehman</surname><given-names>RL</given-names> </name></person-group><article-title>Automated liver elasticity calculation for MR elastography</article-title><source>J Magn Reson Imaging</source><year>2016</year><month>05</month><volume>43</volume><issue>5</issue><fpage>1055</fpage><lpage>1063</lpage><pub-id pub-id-type="doi">10.1002/jmri.25072</pub-id><pub-id pub-id-type="medline">26494224</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rezvani Habibabadi</surname><given-names>R</given-names> </name><name name-style="western"><surname>Khoshpouri</surname><given-names>P</given-names> </name><name name-style="western"><surname>Ghadimi</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Comparison between ROI-based and volumetric measurements in quantifying heterogeneity of liver stiffness using MR elastography</article-title><source>Eur Radiol</source><year>2020</year><month>03</month><volume>30</volume><issue>3</issue><fpage>1609</fpage><lpage>1615</lpage><pub-id pub-id-type="doi">10.1007/s00330-019-06478-0</pub-id><pub-id pub-id-type="medline">31705257</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jaitner</surname><given-names>N</given-names> </name><name name-style="western"><surname>Ludwig</surname><given-names>J</given-names> </name><name name-style="western"><surname>Meyer</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Automated liver and spleen segmentation for MR elastography maps using U-Nets</article-title><source>Sci Rep</source><year>2025</year><month>03</month><day>28</day><volume>15</volume><issue>1</issue><fpage>10762</fpage><pub-id pub-id-type="doi">10.1038/s41598-025-95157-w</pub-id><pub-id pub-id-type="medline">40155744</pub-id></nlm-citation></ref></ref-list></back></article>