<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Med Inform</journal-id><journal-id journal-id-type="publisher-id">medinform</journal-id><journal-id journal-id-type="index">7</journal-id><journal-title>JMIR Medical Informatics</journal-title><abbrev-journal-title>JMIR Med Inform</abbrev-journal-title><issn pub-type="epub">2291-9694</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v14i1e72679</article-id><article-id pub-id-type="doi">10.2196/72679</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Automated Classification of Lymphoma Subtypes From Histopathological Images Using a U-Net Deep Learning Model: Comparative Evaluation Study</article-title></title-group><contrib-group><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Zhao</surname><given-names>Jin</given-names></name><degrees>MM</degrees><xref ref-type="aff" rid="aff1"/><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Wen</surname><given-names>Xiaolian</given-names></name><degrees>MM</degrees><xref ref-type="aff" rid="aff1"/><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Ma</surname><given-names>Li</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Su</surname><given-names>Liping</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1"/></contrib></contrib-group><aff id="aff1"><institution>Department of Hematology, Cancer Hospital Affiliated to Shanxi Medical University, Shanxi Province Cancer Hospital, Shanxi Hospital Affiliated to Cancer Hospital, Chinese Academy of Medical Sciences</institution><addr-line>No. 3, Zhigong New Street</addr-line><addr-line>Taiyuan</addr-line><country>China</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Benis</surname><given-names>Arriel</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Pitsun</surname><given-names>Oleh</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Vasavi</surname><given-names>S</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Liping Su, MD, Department of Hematology, Cancer Hospital Affiliated to Shanxi Medical University, Shanxi Province Cancer Hospital, Shanxi Hospital Affiliated to Cancer Hospital, Chinese Academy of Medical Sciences, No. 3, Zhigong New Street, Taiyuan, 030013, China, 86 0351-4650984; <email>sxsulp2005@163.com</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>these authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>6</day><month>1</month><year>2026</year></pub-date><volume>14</volume><elocation-id>e72679</elocation-id><history><date date-type="received"><day>15</day><month>02</month><year>2025</year></date><date date-type="rev-recd"><day>11</day><month>10</month><year>2025</year></date><date date-type="accepted"><day>13</day><month>10</month><year>2025</year></date></history><copyright-statement>&#x00A9; Jin Zhao, Xiaolian Wen, Li Ma, Liping Su. Originally published in JMIR Medical Informatics (<ext-link ext-link-type="uri" xlink:href="https://medinform.jmir.org">https://medinform.jmir.org</ext-link>), 6.1.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Informatics, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://medinform.jmir.org/">https://medinform.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://medinform.jmir.org/2026/1/e72679"/><abstract><sec><title>Background</title><p>Accurate classification and grading of lymphoma subtypes are essential for treatment planning. Traditional diagnostic methods face challenges of subjectivity and inefficiency, highlighting the need for automated solutions based on deep learning techniques.</p></sec><sec><title>Objective</title><p>This study aimed to investigate the application of deep learning technology, specifically the U-Net model, in classifying and grading lymphoma subtypes to enhance diagnostic precision and efficiency.</p></sec><sec sec-type="methods"><title>Methods</title><p>In this study, the U-Net model was used as the primary tool for image segmentation integrated with attention mechanisms and residual networks for feature extraction and classification. A total of 620 high-quality histopathological images representing 3 major lymphoma subtypes were collected from The Cancer Genome Atlas and the Cancer Imaging Archive. All images underwent standardized preprocessing, including Gaussian filtering for noise reduction, histogram equalization, and normalization. Data augmentation techniques such as rotation, flipping, and scaling were applied to improve the model&#x2019;s generalization capability. The dataset was divided into training (70%), validation (15%), and test (15%) subsets. Five-fold cross-validation was used to assess model robustness. Performance was benchmarked against mainstream convolutional neural network architectures, including fully convolutional network, SegNet, and DeepLabv3+.</p></sec><sec sec-type="results"><title>Results</title><p>The U-Net model achieved high segmentation accuracy, effectively delineating lesion regions and improving the quality of input for classification and grading. The incorporation of attention mechanisms further improved the model&#x2019;s ability to extract key features, whereas the residual structure of the residual network enhanced classification accuracy for complex images. In the test set (N=1250), the proposed fusion model achieved an accuracy of 92% (1150/1250), a sensitivity of 91.04% (1138/1250), a specificity of 89.04% (1113/1250), and an <italic>F</italic><sub>1</sub>-score of 90% (1125/1250) for the classification of the 3 lymphoma subtypes, with an area under the receiver operating characteristic curve of 0.95 (95% CI 0.93&#x2010;0.97). The high sensitivity and specificity of the model indicate strong clinical applicability, particularly as an assistive diagnostic tool.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Deep learning techniques based on the U-Net architecture offer considerable advantages in the automated classification and grading of lymphoma subtypes. The proposed model significantly improved diagnostic accuracy and accelerated pathological evaluation, providing efficient and precise support for clinical decision-making. Future work may focus on enhancing model robustness through integration with advanced algorithms and validating performance across multicenter clinical datasets. The model also holds promise for deployment in digital pathology platforms and artificial intelligence&#x2013;assisted diagnostic workflows, improving screening efficiency and promoting consistency in pathological classification.</p></sec></abstract><kwd-group><kwd>deep learning</kwd><kwd>lymphoma</kwd><kwd>U-Net</kwd><kwd>pathological subtype</kwd><kwd>automated diagnosis</kwd><kwd>medical image analysis</kwd><kwd>artificial intelligence</kwd><kwd>AI</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Lymphoma is a malignant tumor originating in the lymphatic system, with a steadily increasing global incidence. On the basis of its clinical features and histological characteristics, lymphoma can be classified into various subtypes, with Hodgkin lymphoma and non-Hodgkin lymphoma being the most prevalent [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. Accurate classification and grading of lymphoma are critical for clinical treatment and prognostic evaluation [<xref ref-type="bibr" rid="ref3">3</xref>]. Conventional diagnostic approaches rely on the visual examination of tissue sections by pathologists under a microscope, a process that is labor-intensive and influenced by subjective judgment. Diagnostic accuracy is often limited by the pathologist&#x2019;s expertise and technical proficiency and by sample quality [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref5">5</xref>]. With the advancements in medical imaging and computational technology, artificial intelligence (AI) techniques such as deep learning have been gradually introduced into medical image analysis [<xref ref-type="bibr" rid="ref6">6</xref>], showing great potential in automated image segmentation, feature extraction, and disease diagnosis [<xref ref-type="bibr" rid="ref7">7</xref>].</p><p>Deep learning, particularly convolutional neural networks (CNNs), has achieved significant success in medical image analysis, offering new approaches for automated lymphoma diagnosis [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>]. Among them, the U-Net architecture, a specialized form of convolutional neural network, has demonstrated notable effectiveness in segmentation tasks by enabling precise delineation of lesion areas and producing high-quality inputs for subsequent classification and grading [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref10">10</xref>]. Despite these advancements, several challenges persist in the automated analysis of lymphoma histopathology images [<xref ref-type="bibr" rid="ref11">11</xref>]. Minimal morphological differences between subtypes and the inherent complexity of lesion regions increase classification difficulty [<xref ref-type="bibr" rid="ref12">12</xref>]. Moreover, the performance of existing deep learning models remains highly dependent on large volumes of annotated data. Limited datasets and overfitting remain a pressing challenge [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>]. Therefore, developing a deep learning model that can effectively improve the accuracy of lymphoma pathological image classification and grading has become a critical research direction in medical image analysis [<xref ref-type="bibr" rid="ref15">15</xref>].</p><p>Recent studies have shown that pathological diagnosis of lymphoma is associated with considerable interobserver variability, with consistency coefficients (&#x03BA; values) ranging between 0.55 and 0.70. Misclassification frequently occurs among morphologically similar subtypes, including follicular lymphoma (FL) and mantle cell lymphoma (MCL). Previous research has applied CNNs and other machine learning models for automatic classification of pathological images, such as Inception-V3&#x2013;based models for breast cancer image analysis or residual network (ResNet)&#x2013;based models for predicting lung cancer types and molecular features. These studies highlight the substantial potential of AI in assisting pathological diagnosis. However, research specifically focused on the classification of lymphoma pathological subtypes remains limited and often restricted to individual subtypes or single-model architectures. There is still a lack of systematic validation of hybrid deep learning models for fine-grained classification across multiple lymphoma subtypes.</p><p>This study aimed to achieve automated segmentation, subtype classification, and grading of lymphoma pathological images by integrating deep learning techniques, including the U-Net model, attention mechanisms, and residual networks (ResNet), all of which are established deep learning approaches [<xref ref-type="bibr" rid="ref4">4</xref>]. This research has significant clinical and practical implications. Deep learning models have the potential to significantly improve the accuracy of lymphoma pathological diagnosis, reduce human-related diagnostic errors, and provide more objective and consistent diagnostic results [<xref ref-type="bibr" rid="ref16">16</xref>]. Automated systems can also process large volumes of pathological slides at high speed, offering an efficient auxiliary tool for pathologists, accelerating the lymphoma diagnostic workflow, and reducing patient waiting times. Moreover, precise identification and grading of lymphoma subtypes may contribute to the development of personalized treatment strategies, allowing for more tailored treatment plans and improving treatment outcomes and survival rates [<xref ref-type="bibr" rid="ref17">17</xref>].</p><p>The primary objective of this study was to explore and evaluate the application of deep learning techniques in classifying and grading lymphoma pathological subtypes. An automated image analysis system was constructed based on the U-Net architecture. Through deep learning methods such as image segmentation, feature extraction, and classification prediction, this study sought to accurately identify different lymphoma subtypes and achieve effective grading. In addition, this study used cross-validation techniques to assess the stability and accuracy of the proposed model. A comparative analysis with traditional pathological diagnostic methods was conducted to verify the model&#x2019;s clinical feasibility. Ultimately, this study aimed to provide effective technical support for the early diagnosis of lymphoma, advancing the application and development of AI into the field of medical pathology.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Data Collection and Preprocessing</title><p>A total of 620 high-quality histopathological images representing 3 major lymphoma subtypes were collected from The Cancer Genome Atlas (TCGA) and The Cancer Imaging Archive (TCIA). The number of samples per subtype was approximately balanced. Inclusion criteria required complete pathological annotations and clearly defined tissue structures. Images exhibiting severe artifacts, incomplete labeling, or low resolution were excluded. All slides were stained with hematoxylin and eosin and resized to 512 &#x00D7; 512 pixels to ensure model compatibility. No stain normalization was applied.</p><p>The image data in this study primarily consisted of standardized lymphoma tissue section images captured using high-resolution digital scanners and stored in TIFF or PNG format. Image preprocessing steps included denoising, contrast enhancement, and image normalization. Gaussian filtering was used to reduce noise, whereas histogram equalization was used to enhance contrast and improve the visibility of lesion regions. All images were uniformly cropped and resized to ensure consistent input data quality for the network. These preprocessing procedures were implemented to improve data quality and model classification accuracy (Figure S1 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>).</p></sec><sec id="s2-2"><title>Feature Extraction From Image Data</title><p>This study used the U-Net architecture to conduct image segmentation, enabling precise identification of lesion areas and the generation of high-quality segmentation maps. Subsequently, ResNet was used for feature extraction and classification. The residual learning structure in ResNet enabled the capture of deep pathological features from the segmented images, thereby improving classification accuracy and supporting automated grading of lymphoma subtypes. During training, 5-fold cross-validation was used to optimize model hyperparameters and reduce the risk of overfitting. The Adam optimization algorithm was used in conjunction with a learning rate decay strategy to enhance training stability and convergence (Figure S2 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). The training dataset was randomly divided into 3 subsets, with 70% allocated to the training set and 15% each allocated to the validation and test sets.</p></sec><sec id="s2-3"><title>Model Construction and Training</title><p>The model used in this study was based on the standard U-Net architecture, enhanced by integrating ResNet residual modules and attention mechanisms. To achieve subtype classification and grading of lymphoma, the combined use of U-Net and ResNet allowed for effective image segmentation and deep feature extraction. The cross-entropy loss function was used, and the Adam optimizer with an initial learning rate of 0.001 was adopted to ensure stable model convergence. A learning rate decay mechanism was applied to progressively adjust the learning rate, improving model convergence and training stability (Figure S3 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). The model can automatically extract and learn features of different lymphoma subtypes and grades from images through this training process, enabling efficient classification and accurate grading of new images.</p><p>To further justify the selected model architecture, a comparative evaluation was conducted against several classic CNN frameworks widely used in medical image analysis (Table S1 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). Fully convolutional networks offered advantages in simplicity and computational efficiency but demonstrated limited sensitivity in detecting small lesions and boundary structures in pathological images. SegNet achieved reliable semantic segmentation performance but often resulted in detail loss during the upsampling stage due to its deconvolutional design. DeepLabv3+, which uses atrous convolution and multiscale feature fusion, significantly improved segmentation accuracy but introduced increased model complexity, computational cost, and reduced interpretability in clinical settings. In contrast, the hybrid deep learning framework proposed in this study&#x2014;featuring a U-Net backbone, ResNet residual connections, and attention mechanisms&#x2014;achieved a better balance among segmentation accuracy, training stability, and interpretability. It is particularly well-suited for the classification and grading of lymphoma images characterized by blurry boundaries and high subtype heterogeneity.</p></sec><sec id="s2-4"><title>Model Evaluation and Optimization</title><p>Model performance was evaluated using multiple metrics, including accuracy, recall, precision, and <italic>F</italic><sub>1</sub>-score, to assess classification effectiveness. The receiver operating characteristic curve and the area under the curve (AUC) were used to further evaluate the model&#x2019;s discriminative ability. To prevent overfitting and enhance generalization capability, the model incorporated dropout layers and data augmentation techniques. Augmentation strategies included image rotation, translation, and flipping, which increased the diversity of the training data and improved model robustness. During training, the performance on the validation set was monitored in real time to identify and preserve the model with the best generalization ability for subsequent testing (Figure S4 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>).</p></sec><sec id="s2-5"><title>Traditional Pathology Baseline and Metric Sources</title><p>To establish a reproducible baseline for comparison with traditional pathological diagnosis, we conducted a randomized crossover washout reading study. Six board-certified pathologists with &#x2265;5 years of professional experience were enrolled. From the 620 cases included in this study, a subset of 180 (29%) was stratified by subtype, center, and diagnostic difficulty to form the reading set. The reference standard (ground truth) was determined by 2 senior pathologists independently, with any discrepancies resolved by a third expert adjudicator. Each reader completed case interpretation under 2 study arms: unassisted and AI-assisted (with U-Net&#x2013;generated segmentation masks and posterior probability and attention cues displayed). The order of arms and case sequence was computer randomized, with a washout period of at least 2 weeks, and the readers were blinded to previous assessments. The primary endpoint was case-level accuracy. Secondary endpoints included sensitivity, specificity, <italic>F</italic><sub>1</sub>-score, AUC, mean average precision (mAP), weighted &#x03BA;, and single-case reading time. Segmentation performance was evaluated using case-level dice coefficient and intersection over union (macro- and microaveraged). Classification and grading were assessed at the whole-slide image level; patch-level probabilities were aggregated via attention pooling, and classification thresholds were determined on the validation set using the Youden index and subsequently fixed for the test set. For statistical analyses, the DeLong test was used to compare AUCs, whereas the McNemar test was applied to paired proportions (accuracy, sensitivity, and specificity). The 95% CIs were estimated via the bias-corrected and accelerated bootstrap method (1000 iterations), and Holm-Bonferroni correction was used for multiple comparisons.</p></sec><sec id="s2-6"><title>Image Data Analysis Methods</title><p>Image data analysis focused on 2 primary tasks: image classification and subtype grading. First, deep CNNs based on U-Net and ResNet were used to analyze histopathological slide images. These architectures effectively captured cellular morphology and tissue-level structural features, enabling accurate differentiation among lymphoma subtypes. For subtype grading, the analysis extended beyond subtype identification to include the evaluation of lymphocyte distribution, morphological variation, and lesion heterogeneity within the image (Figure S5 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). To ensure accuracy in classification and grading, the image analysis methods integrated traditional medical imaging analysis techniques with deep learning approaches using automated and manual annotation strategies.</p></sec><sec id="s2-7"><title>Statistical Analysis and Result Validation</title><p>Following model construction, statistical methods were used to validate its performance comprehensively. In addition to conventional classification evaluation metrics, a confusion matrix was used to analyze the model&#x2019;s prediction results in detail, identifying errors such as false positives and false negatives. The average accuracy of the model was calculated through multiple repeated experiments, and k-fold cross-validation was used to ensure the robustness of the results. Furthermore, 2-tailed <italic>t</italic> tests or ANOVA were conducted to analyze differences among models, ensuring that the selected model demonstrated superior performance in lymphoma subtype classification and grading tasks (Figure S6 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>).</p></sec><sec id="s2-8"><title>Feature Analysis and Subtype Differentiation</title><p>Feature analysis was conducted using the U-Net and ResNet architectures to extract and differentiate cellular and tissue-level characteristics from histopathological images. Key features, including tumor cell morphology, tissue architecture, and cellular distribution, were effectively captured to support the accurate identification of lymphoma subtypes and enhance understanding of histological variation among them.</p><p>U-Net provided precise segmentation of lesion regions, whereas ResNet enabled efficient feature extraction and supported deeper network training. The combination of these 2 architectures facilitated the recognition of complex spatial relationships between cells and tissues, contributing to improved classification accuracy.</p><p>In addition, statistical analysis methods are crucial in evaluating model performance. Metrics such as sensitivity, specificity, precision, and recall were calculated to comprehensively assess the performance of different models in classifying lymphoma pathological subtypes. Confusion matrices and receiver operating characteristic curve analyses were also used to evaluate each model&#x2019;s predictive effectiveness, providing deeper insights into the strengths and limitations of the models in identifying various subtypes.</p></sec><sec id="s2-9"><title>Ethical Considerations</title><p>All histopathological images used in this study were obtained from publicly available databases (TCGA project and TCIA). All datasets underwent strict anonymization procedures before public release and complied with the relevant ethical policies and data sharing regulations. No new patient samples were collected during the course of this research. Therefore, additional ethics approval was not required. This study adhered to the ethical principles outlined in the Declaration of Helsinki and its subsequent amendments. Access to the image data is available under the open access policies of TCGA and TCIA or upon request from the corresponding author.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Effect Analysis of Data Preprocessing and Image Quality Enhancement</title><p>In this study, the quality of lymphoma pathological slide images directly affected the performance of the deep learning model. Therefore, multiple image preprocessing techniques, including Gaussian filtering for noise reduction, histogram equalization, and image normalization, were applied to improve image quality. <xref ref-type="fig" rid="figure1">Figure 1</xref> illustrates the visual differences between raw and preprocessed images. In the preprocessed images, background noise was substantially reduced, edges appeared sharper, and lesion regions became more prominent. These enhancements facilitated clearer visualization of pathological features and improved input consistency for model training. During subsequent deep learning model training, the preprocessed images exhibited higher segmentation accuracy and lower error rates. A detailed dataset analysis was also conducted to assess sample distribution. <xref ref-type="table" rid="table1">Table 1</xref> presents the number of samples in the training, validation, and test sets, along with the specific distribution of each class, providing crucial data support for subsequent model training.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Visualization of data preprocessing effects. This figure showcases the effects of different preprocessing stages on the same lymphoma image slide: (A) the original image; (B) the denoised image processed through Gaussian filtering; (C) the contrast-enhanced image after histogram equalization; and (D) the normalized image, showing clearer edges and details.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v14i1e72679_fig01.png"/></fig><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Distribution of dataset samples&#x2014;the number of samples in the training, validation, and test sets along with their corresponding categories.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Dataset</td><td align="left" valign="bottom">Number of immune cell samples</td><td align="left" valign="bottom">Number of epithelial cell samples</td><td align="left" valign="bottom">Number of matrix cell samples</td><td align="left" valign="bottom">Total</td></tr></thead><tbody><tr><td align="left" valign="top">Training set</td><td align="left" valign="top">4000</td><td align="left" valign="top">3000</td><td align="left" valign="top">2500</td><td align="left" valign="top">9500</td></tr><tr><td align="left" valign="top">Validation set</td><td align="left" valign="top">500</td><td align="left" valign="top">400</td><td align="left" valign="top">350</td><td align="left" valign="top">1250</td></tr><tr><td align="left" valign="top">Test set</td><td align="left" valign="top">500</td><td align="left" valign="top">400</td><td align="left" valign="top">350</td><td align="left" valign="top">1250</td></tr><tr><td align="left" valign="top">Total</td><td align="left" valign="top">5000</td><td align="left" valign="top">3800</td><td align="left" valign="top">3200</td><td align="left" valign="top">12,000</td></tr></tbody></table></table-wrap><p>To evaluate the impact of image preprocessing on model performance, a comparative experiment was conducted using both unprocessed and preprocessed images. The results showed that unprocessed images achieved only 77.12% (964/1250) accuracy in the segmentation task, with poor edge detection quality, slower model convergence, and a substantially higher error rate. In contrast, preprocessed images achieved a segmentation accuracy of 84.4% (1055/1250), demonstrating clearer delineation of lesion boundaries and exhibiting a noticeably faster training speed (Table S2 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). These findings highlight the importance of preprocessing steps such as Gaussian filtering, histogram equalization, and normalization in significantly enhancing segmentation performance.</p></sec><sec id="s3-2"><title>Performance of the U-Net Model in Image Segmentation</title><p>The application of the U-Net architecture in image segmentation demonstrated strong performance in accurately segmenting lesion areas in lymphoma pathological slides, significantly outperforming traditional methods. <xref ref-type="fig" rid="figure2">Figure 2</xref> illustrates the performance of the U-Net model in segmentation tasks. During training and validation, the U-Net model achieved a segmentation accuracy exceeding 85.04% (1063/1250) on the test set, with a recall rate of 88% (1100/1250). These results confirmed the capability of U-Net in handling complex medical images, particularly lymphoma pathological slides. Effective segmentation of lesion areas provided high-resolution input data for downstream tasks, including subtype classification and pathological grading.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>U-Net image segmentation results. This figure shows the performance of the U-Net model on the image segmentation task for lymphoma pathological slides. Segmented regions are marked in different colors to distinguish lesion areas from healthy tissue.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v14i1e72679_fig02.png"/></fig></sec><sec id="s3-3"><title>Analysis of the Synergistic Effects of Attention Mechanisms and ResNet</title><p>Integrating attention mechanisms with ResNet for feature extraction and classification markedly enhanced model performance. <xref ref-type="fig" rid="figure3">Figure 3</xref> shows the results of lymphoma subtype classification using the combined model. Incorporating the attention mechanism enabled the network to concentrate on diagnostically relevant image regions, thereby reducing errors in differentiating morphologically similar subtypes. Meanwhile, the residual structure of ResNet effectively mitigated the vanishing gradient problem and facilitated the learning of complex image features. Compared with traditional CNNs, the classification accuracy of the proposed deep learning model increased from 81.04% (1013/1250) to 91.04% (1138/1250), representing an improvement of approximately 10 percentage points.</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Analysis of attention mechanism and residual network (ResNet) synergy. This figure compares model performance in subtype classification tasks: (A) results of a traditional convolutional neural network and (B) results after incorporating attention mechanisms and ResNet, showing significant improvement in accuracy. CLL: chronic lymphocytic leukemia; FL: follicular lymphoma; MCL: mantle cell lymphoma.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v14i1e72679_fig03.png"/></fig></sec><sec id="s3-4"><title>Evaluation of Model Stability and Generalization Capability</title><p>To evaluate the stability and generalization capability of the model, 5-fold cross-validation was performed. The model demonstrated high consistency across the 5 independently partitioned training-validation subsets with only minor fluctuations in accuracy (<xref ref-type="fig" rid="figure4">Figure 4</xref>). During cross-validation (N=10,750 samples), the model achieved an average accuracy of 90% (9675/10,750), confirming its robustness and reliability. Even under small-sample conditions, the model maintained high predictive accuracy, indicating good generalization ability and strong resistance to overfitting.</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Analysis of model stability and generalization capability. This figure shows the accuracy changes during 5-fold cross-validation. The blue line represents the training set, and the red line represents the validation set. The figure shows stable and minimal fluctuations in model performance across different data splits.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v14i1e72679_fig04.png"/></fig></sec><sec id="s3-5"><title>Analysis of Lymphoma Subtype Classification Results</title><p>To evaluate the performance of the deep learning model in lymphoma subtype classification, classification results on the test set were analyzed using accuracy, precision-recall curves, and mAP as evaluation metrics. The results showed that the model achieved a classification accuracy of 98% (1225/1250) when distinguishing FL, chronic lymphocytic leukemia, and MCL. Additionally, the precision-recall curve demonstrated a high AUC, and the mAP reached 97%, indicating strong overall classification performance. Multiclass analysis revealed that most classification errors occurred between subtypes with similar morphological characteristics, such as FL and MCL (<xref ref-type="fig" rid="figure5">Figure 5</xref>). The incorporation of a feature enhancement module significantly improved the model&#x2019;s ability to extract critical features from key regions, leading to a nearly 5% increase in classification performance.</p><fig position="float" id="figure5"><label>Figure 5.</label><caption><p>Evaluation of lymphoma subtype classification performance. This figure shows the model&#x2019;s performance in subtype classification, including (A) accuracy, (B) precision-recall curve, and (C) mean average precision (mAP), reflecting comprehensive performance in multiclass classification tasks. CLL: chronic lymphocytic leukemia; FL: follicular lymphoma; MCL: mantle cell lymphoma.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v14i1e72679_fig05.png"/></fig><p>Under 5-fold cross-validation (total sample size: n=10,750), the model achieved an average classification accuracy of 90% (9675/10,750; SD 1.9%), an <italic>F</italic><sub>1</sub>-score of 90% (9673/10750; SD 2.3%), and an AUC of 0.95 (SD 0.015) across the 3 lymphoma subtypes, all statistically significant (=0.004). In terms of subtype performance, the model demonstrated the highest sensitivity for FL at 94.2% (393/417), followed by chronic lymphocytic leukemia at 90.6% (378/417), whereas the performance for MCL was relatively lower at 87.3% (363/416), which may be attributed to the blurred boundaries and higher heterogeneity typically observed in MCL images. As this study primarily focused on model architecture and performance evaluation, interpretability tools such as gradient-weighted class activation mapping were not incorporated. Future work will include model interpretability analyses to enhance its clinical applicability.</p><p>To evaluate the superiority of the proposed model across different deep learning architectures, we systematically compared the fusion model with several mainstream CNN-based frameworks (Table S3 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). The results showed that traditional fully convolutional networks and SegNet underperformed in the detection of small lesions and boundary delineation, with overall accuracies of 81.04% (1013/1250) and 83.52% (1044/1250), respectively&#x2014;significantly lower than those of U-Net and its improved variants (<italic>P</italic>=0.006). DeepLabv3+ achieved improved segmentation accuracy (dice coefficient=83.9%) but suffered from increased training complexity and limited interpretability. In contrast, U-Net demonstrated stable performance in both segmentation and classification tasks (dice coefficient=85.5%; accuracy=1099/1250, 87.92%). The further incorporation of attention mechanisms and ResNet led to continuous performance gains, and the final fusion model achieved the best results in segmentation dice coefficient, classification accuracy, and AUC (dice coefficient=89.7%; accuracy=1150/1250, 92%; AUC=0.95), with statistically significant differences (<italic>P</italic>&#x003C;). These findings indicate that the proposed fusion model provides a significant advantage in multisubtype classification and grading tasks.</p></sec><sec id="s3-6"><title>Comparative Analysis With Traditional Pathological Diagnosis Methods</title><p>Compared to traditional pathologists&#x2019; manual slide review methods, the deep learning model in this study demonstrated significant advantages in classification and grading tasks of lymphoma pathology images. Figure S7 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> shows a comparison of accuracy between the deep learning model and the average accuracy of pathologists. Under conditions involving small sample sizes or diagnostically complex subtypes, the model maintained consistent performance and achieved higher classification accuracy than pathologists. Particularly in images with rich details or blurred edges, the deep learning model reduced human error and improved diagnostic efficiency, highlighting its potential application in clinical practice. To ensure fairness in comparison, all participating pathologists were mid- to senior-level professionals with more than 5 years of diagnostic experience in hematopathology (Figure S7 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). Diagnoses were conducted in accordance with the World Health Organization classification guidelines and the expert consensus on pathological diagnosis of lymphoma. Each pathologist independently reviewed the same set of blinded slide images, and no communication was allowed during the review process. Final diagnostic results were determined through majority consensus. This selection protocol was designed to reflect the average diagnostic level of experienced pathologists in routine clinical practice.</p></sec><sec id="s3-7"><title>Model Optimization and Accuracy Improvement Strategies</title><p>To improve model accuracy and stability, a series of optimization strategies were implemented, including learning rate decay, data augmentation, and dropout regularization, which significantly improved model performance. <xref ref-type="fig" rid="figure6">Figure 6</xref> illustrates the performance changes of the model under different optimization strategies. The model gained greater diversity during training through the use of data augmentation, effectively improving its generalization capability. The incorporation of dropout layers effectively mitigated overfitting by reducing model reliance on specific neurons, allowing the network to maintain high accuracy on new datasets.</p><fig position="float" id="figure6"><label>Figure 6.</label><caption><p>Effects of model optimization on accuracy improvement. This figure illustrates the impact of optimization strategies (learning rate decay, data augmentation, and dropout regularization) on model training: (A) changes in the loss function before and after optimization and (B) changes in accuracy before and after optimization, showing improved accuracy.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v14i1e72679_fig06.png"/></fig></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>This study explored the application of deep learning techniques, particularly the U-Net model, in classifying and grading lymphoma pathological subtypes. The results showed that deep learning models significantly improved the efficiency and accuracy of automated lymphoma diagnosis. By integrating U-Net for image segmentation with ResNet for feature extraction, a robust diagnostic system was developed capable of differentiating Hodgkin lymphoma from non-Hodgkin lymphoma and accurately predicting lesion grades. Notably, the proposed deep learning model demonstrated significant advantages in segmentation accuracy, classification accuracy, and model stability. These results validated the potential of deep learning in medical image analysis, especially in disease diagnosis and clinical decision support systems.</p><p>U-Net exhibited outstanding performance in image segmentation tasks. Its precision in delineating lesion regions highlighted its ability to process complex pathological features. Lymphoma lesion regions often display irregular morphology and poorly defined boundaries, which limit the effectiveness of conventional manual segmentation methods. U-Net, through the hierarchical feature extraction capabilities of CNNs [<xref ref-type="bibr" rid="ref15">15</xref>], enabled automated and accurate segmentation of lesion areas while minimizing subjective variability. The model achieved an accuracy exceeding 85.04% (1063/1250) on the test set, significantly improving the efficiency of lesion recognition and extraction [<xref ref-type="bibr" rid="ref9">9</xref>].</p><p>However, despite the excellent performance of the deep learning model, several challenges were encountered, particularly regarding data scarcity and annotation quality. The acquisition of pathological images of lymphoma remains difficult, particularly for high-quality annotated data, which may impact model training effectiveness and generalization ability [<xref ref-type="bibr" rid="ref7">7</xref>]. To address this issue, this study used strategies such as data augmentation and cross-validation [<xref ref-type="bibr" rid="ref18">18</xref>] to improve the robustness and accuracy of the model. Nevertheless, inconsistencies in data quality, particularly the subjective differences among pathologists during annotation [<xref ref-type="bibr" rid="ref19">19</xref>], may still affect model training and prediction. Future work could benefit from the development of standardized annotation protocols and the construction of larger, high-quality datasets to further refine model performance.</p><p>To optimize model accuracy and stability, several techniques were incorporated, including attention mechanisms, ResNet-based residual learning, and comprehensive data augmentation. The use of attention modules enabled the model to more precisely focus on diagnostically relevant regions within the pathological images, thereby reducing the influence of irrelevant features and enhancing subtype classification accuracy. ResNet&#x2019;s residual structure effectively mitigated the common issue of vanishing gradients in deep networks, enhancing the model&#x2019;s ability to learn complex pathological features. These optimization strategies significantly improved the model&#x2019;s performance in lymphoma subtype classification, achieving a classification accuracy of 92% (1150/1250) and a grading accuracy of 89.04% (1113/1250).</p><p>Compared to traditional pathological diagnostic methods, deep learning models offer distinct advantages. Traditional pathology relies on pathologists&#x2019; experience and technical expertise, making the diagnostic process vulnerable to subjectivity and relatively inefficient when analyzing complex histological images [<xref ref-type="bibr" rid="ref20">20</xref>]. In contrast, deep learning models can process large volumes of images quickly and provide efficient and objective diagnostic results [<xref ref-type="bibr" rid="ref21">21</xref>]. Comparative analysis with manual slide review by pathologists revealed that deep learning models exhibited higher accuracy in handling complex and detail-rich images. Specifically, deep learning models reduced human error and improved diagnostic efficiency in lesion segmentation and subtype differentiation [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref23">23</xref>].</p><p>Despite the robust performance of deep learning models, several technical limitations remain. First, deep learning models&#x2019; &#x201C;black-box&#x201D; nature presents a significant challenge, especially in clinical settings, where physicians tend to favor interpretable diagnostic methods. Future research should focus on enhancing model interpretability by using visualization techniques and traceability analysis, allowing clinicians to better understand the rationale behind model predictions. Second, although the model achieved high accuracy in feature extraction and classification tasks, minor errors may still occur in subtype classification and grading, particularly when morphological differences between subtypes are subtle. Improving the model&#x2019;s performance in analyzing complex pathological images remains a key direction for future research.</p><p>In summary, this study confirmed the immense potential of deep learning technologies in lymphoma pathological image analysis and provides robust technical support for the automated diagnosis of clinical lymphoma. With ongoing advancements in computational methods, the application of deep learning in medical imaging analysis will become more extensive and profound. Such progress will play a vital role in developing personalized treatment plans, evaluating therapeutic efficacy, and improving patient survival rates, thereby providing substantial clinical value. The successful application of the proposed approach not only accelerated the diagnostic workflow but also provided clinicians with precise and efficient diagnostic tools, promoting greater automation and enhanced analytical capability in pathological image analysis.</p></sec><sec id="s4-2"><title>Conclusions</title><p>This study demonstrated the significant potential of deep learning techniques, particularly the U-Net model, in lymphoma pathological subtype classification and grading. By combining U-Net for image segmentation and ResNet for feature extraction, an efficient and accurate diagnostic framework was developed capable of distinguishing Hodgkin lymphoma from non-Hodgkin lymphoma and precisely predicting the grades of lesions. The proposed model achieved strong performance in segmentation accuracy, classification accuracy, and model stability, validating the potential of deep learning technologies in medical imaging analysis, especially in disease diagnosis and clinical decision support.</p><p>As the core of this research, the U-Net model exhibited exceptional performance in segmenting pathological regions, effectively addressing challenges such as irregular morphology and blurred boundaries in lymphoma tissues. These capabilities significantly improved segmentation efficiency and accuracy, providing high-quality input data for subsequent classification and grading tasks. Additionally, integrating attention mechanisms and ResNet further optimized feature extraction and classification capabilities, achieving a classification accuracy of 92% (1150/1250) and a grading accuracy of 89.04% (1113/1250).</p><p>Despite the encouraging results, certain limitations remain. The limited availability of annotated lymphoma pathological images and inconsistencies in labeling quality may affect the model&#x2019;s training effectiveness and generalizability. Although data augmentation and cross-validation were used to improve robustness, variability in annotation and the scarcity of high-quality labeled data continue to pose challenges. In addition, the deep learning model&#x2019;s &#x201C;black-box&#x201D; nature limits its interpretability, which affects clinical acceptance and the transparency of decision-making. Moreover, the dataset used in this study was primarily derived from publicly available databases with a relatively limited sample size and without multicenter or real-world clinical validation, which may introduce sampling bias and increase the risk of overfitting. The model also exhibited relatively lower recognition performance for morphologically ambiguous subtypes such as MCL, suggesting that further optimization is needed to address highly heterogeneous lesions. Future work should focus on expanding the dataset with multicenter cohorts, integrating the model into digital pathology workflows as a clinical decision support tool, enhancing interpretability through visualization-based modules, and addressing ethical and regulatory challenges to ensure the safe and responsible application of AI technologies in health care settings.</p><p>In summary, this study demonstrated the outstanding performance of U-Net&#x2013;based deep learning models in analyzing lymphoma pathological images. These techniques significantly improved diagnostic efficiency and accuracy while offering clinicians effective and reliable decision support tools. As AI technology continues to evolve, deep learning models are expected to play an increasingly important role in medical imaging analysis, offering robust support for developing personalized treatment plans and enhancing patient outcomes, thereby advancing medical imaging analysis toward greater automation and intelligence as shown in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>.</p></sec></sec></body><back><notes><sec><title>Funding</title><p>This study was supported by the Fundamental Research Program of Shanxi Province (202203021222388).</p></sec><sec><title>Data Availability</title><p>The datasets generated or analyzed during this study are available from the corresponding author on reasonable request.</p></sec></notes><fn-group><fn fn-type="con"><p>LS conceived and designed the study. JZ, XW, and LM performed the experiments and analyzed the data. JZ and XW wrote the manuscript. All authors reviewed and approved the final version of the manuscript.</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">AUC</term><def><p>area under the curve</p></def></def-item><def-item><term id="abb3">CNN</term><def><p>convolutional neural network</p></def></def-item><def-item><term id="abb4">FL</term><def><p>follicular lymphoma</p></def></def-item><def-item><term id="abb5">mAP</term><def><p>mean average precision</p></def></def-item><def-item><term id="abb6">MCL</term><def><p>mantle cell lymphoma</p></def></def-item><def-item><term id="abb7">ResNet</term><def><p>residual network</p></def></def-item><def-item><term id="abb8">TCGA</term><def><p>The Cancer Genome Atlas</p></def></def-item><def-item><term id="abb9">TCIA</term><def><p>The Cancer Imaging Archive</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pichler</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Amador</surname><given-names>C</given-names> </name><name name-style="western"><surname>Fujimoto</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Advances in peripheral T cell lymphomas: pathogenesis, genetic landscapes and emerging therapeutic targets</article-title><source>Histopathology</source><year>2025</year><month>01</month><volume>86</volume><issue>1</issue><fpage>119</fpage><lpage>133</lpage><pub-id pub-id-type="doi">10.1111/his.15376</pub-id><pub-id pub-id-type="medline">39679758</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Al-Maghrabi</surname><given-names>H</given-names> </name><name name-style="western"><surname>Al-Maghrabi</surname><given-names>J</given-names> </name></person-group><article-title>Non-Hodgkin&#x2019;s primary lymphoma involving the genitourinary tract: histopathological experience from two tertiary hospitals, western region, Saudi Arabia</article-title><source>Am J Clin Exp Urol</source><year>2024</year><volume>12</volume><issue>5</issue><fpage>288</fpage><lpage>295</lpage><pub-id pub-id-type="doi">10.62347/GFNJ2400</pub-id><pub-id pub-id-type="medline">39584011</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Qiu</surname><given-names>L</given-names> </name><name name-style="western"><surname>Medeiros</surname><given-names>LJ</given-names> </name><name name-style="western"><surname>Li</surname><given-names>S</given-names> </name></person-group><article-title>High-grade B-cell lymphomas: double hit and non-double hit</article-title><source>Hum Pathol</source><year>2025</year><month>02</month><volume>156</volume><fpage>105700</fpage><pub-id pub-id-type="doi">10.1016/j.humpath.2024.105700</pub-id><pub-id pub-id-type="medline">39603365</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Doeleman</surname><given-names>T</given-names> </name><name name-style="western"><surname>Hondelink</surname><given-names>LM</given-names> </name><name name-style="western"><surname>Vermeer</surname><given-names>MH</given-names> </name><name name-style="western"><surname>van Dijk</surname><given-names>MR</given-names> </name><name name-style="western"><surname>Schrader</surname><given-names>AM</given-names> </name></person-group><article-title>Artificial intelligence in digital pathology of cutaneous lymphomas: a review of the current state and future perspectives</article-title><source>Semin Cancer Biol</source><year>2023</year><month>09</month><volume>94</volume><fpage>81</fpage><lpage>88</lpage><pub-id pub-id-type="doi">10.1016/j.semcancer.2023.06.004</pub-id><pub-id pub-id-type="medline">37331571</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kuker</surname><given-names>RA</given-names> </name><name name-style="western"><surname>Lehmkuhl</surname><given-names>D</given-names> </name><name name-style="western"><surname>Kwon</surname><given-names>D</given-names> </name><etal/></person-group><article-title>A deep learning-aided automated method for calculating metabolic tumor volume in diffuse large B-cell lymphoma</article-title><source>Cancers (Basel)</source><year>2022</year><month>10</month><day>25</day><volume>14</volume><issue>21</issue><fpage>5221</fpage><pub-id pub-id-type="doi">10.3390/cancers14215221</pub-id><pub-id pub-id-type="medline">36358642</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Haghofer</surname><given-names>A</given-names> </name><name name-style="western"><surname>Fuchs-Baumgartinger</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lipnik</surname><given-names>K</given-names> </name><etal/></person-group><article-title>Histological classification of canine and feline lymphoma using a modular approach based on deep learning and advanced image processing</article-title><source>Sci Rep</source><year>2023</year><month>11</month><day>9</day><volume>13</volume><issue>1</issue><fpage>19436</fpage><pub-id pub-id-type="doi">10.1038/s41598-023-46607-w</pub-id><pub-id pub-id-type="medline">37945699</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Naji</surname><given-names>H</given-names> </name><name name-style="western"><surname>Sancere</surname><given-names>L</given-names> </name><name name-style="western"><surname>Simon</surname><given-names>A</given-names> </name><etal/></person-group><article-title>HoLy-Net: segmentation of histological images of diffuse large B-cell lymphoma</article-title><source>Comput Biol Med</source><year>2024</year><month>03</month><volume>170</volume><fpage>107978</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2024.107978</pub-id><pub-id pub-id-type="medline">38237235</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yang</surname><given-names>YF</given-names> </name><name name-style="western"><surname>Zhao</surname><given-names>E</given-names> </name><name name-style="western"><surname>Shi</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>H</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>YY</given-names> </name></person-group><article-title>Multicenter investigation of preoperative distinction between primary central nervous system lymphomas and glioblastomas through interpretable artificial intelligence models</article-title><source>Neuroradiology</source><year>2024</year><month>11</month><volume>66</volume><issue>11</issue><fpage>1893</fpage><lpage>1906</lpage><pub-id pub-id-type="doi">10.1007/s00234-024-03451-7</pub-id><pub-id pub-id-type="medline">39225815</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shi</surname><given-names>T</given-names> </name><name name-style="western"><surname>Jiang</surname><given-names>H</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>M</given-names> </name><name name-style="western"><surname>Diao</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>G</given-names> </name><name name-style="western"><surname>Yao</surname><given-names>YD</given-names> </name></person-group><article-title>Metabolic anomaly appearance aware U-net for automatic lymphoma segmentation in whole-body PET/CT scans</article-title><source>IEEE J Biomed Health Inform</source><year>2023</year><month>05</month><volume>27</volume><issue>5</issue><fpage>2465</fpage><lpage>2476</lpage><pub-id pub-id-type="doi">10.1109/JBHI.2023.3248099</pub-id><pub-id pub-id-type="medline">37027631</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Weisman</surname><given-names>AJ</given-names> </name><name name-style="western"><surname>Kieler</surname><given-names>MW</given-names> </name><name name-style="western"><surname>Perlman</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Comparison of 11 automated PET segmentation methods in lymphoma</article-title><source>Phys Med Biol</source><year>2020</year><month>11</month><day>27</day><volume>65</volume><issue>23</issue><fpage>235019</fpage><pub-id pub-id-type="doi">10.1088/1361-6560/abb6bd</pub-id><pub-id pub-id-type="medline">32906088</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Isavand</surname><given-names>P</given-names> </name><name name-style="western"><surname>Aghamiri</surname><given-names>SS</given-names> </name><name name-style="western"><surname>Amin</surname><given-names>R</given-names> </name></person-group><article-title>Applications of multimodal artificial intelligence in non-Hodgkin lymphoma B cells</article-title><source>Biomedicines</source><year>2024</year><month>08</month><day>5</day><volume>12</volume><issue>8</issue><fpage>1753</fpage><pub-id pub-id-type="doi">10.3390/biomedicines12081753</pub-id><pub-id pub-id-type="medline">39200217</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Steinbuss</surname><given-names>G</given-names> </name><name name-style="western"><surname>Kriegsmann</surname><given-names>M</given-names> </name><name name-style="western"><surname>Zgorzelski</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Deep learning for the classification of non-Hodgkin lymphoma on histopathological images</article-title><source>Cancers (Basel)</source><year>2021</year><month>05</month><day>17</day><volume>13</volume><issue>10</issue><fpage>2419</fpage><pub-id pub-id-type="doi">10.3390/cancers13102419</pub-id><pub-id pub-id-type="medline">34067726</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tran</surname><given-names>KA</given-names> </name><name name-style="western"><surname>Kondrashova</surname><given-names>O</given-names> </name><name name-style="western"><surname>Bradley</surname><given-names>A</given-names> </name><name name-style="western"><surname>Williams</surname><given-names>ED</given-names> </name><name name-style="western"><surname>Pearson</surname><given-names>JV</given-names> </name><name name-style="western"><surname>Waddell</surname><given-names>N</given-names> </name></person-group><article-title>Deep learning in cancer diagnosis, prognosis and treatment selection</article-title><source>Genome Med</source><year>2021</year><month>09</month><day>27</day><volume>13</volume><issue>1</issue><fpage>152</fpage><pub-id pub-id-type="doi">10.1186/s13073-021-00968-x</pub-id><pub-id pub-id-type="medline">34579788</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Schmidt-Barbo</surname><given-names>P</given-names> </name><name name-style="western"><surname>Kalweit</surname><given-names>G</given-names> </name><name name-style="western"><surname>Naouar</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Detection of disease-specific signatures in B cell repertoires of lymphomas using machine learning</article-title><source>PLoS Comput Biol</source><year>2024</year><month>07</month><volume>20</volume><issue>7</issue><fpage>e1011570</fpage><pub-id pub-id-type="doi">10.1371/journal.pcbi.1011570</pub-id><pub-id pub-id-type="medline">38954728</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jiang</surname><given-names>C</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>K</given-names> </name><name name-style="western"><surname>Teng</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>Deep learning-based tumour segmentation and total metabolic tumour volume prediction in the prognosis of diffuse large B-cell lymphoma patients in 3D FDG-PET images</article-title><source>Eur Radiol</source><year>2022</year><month>07</month><volume>32</volume><issue>7</issue><fpage>4801</fpage><lpage>4812</lpage><pub-id pub-id-type="doi">10.1007/s00330-022-08573-1</pub-id><pub-id pub-id-type="medline">35166895</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jiang</surname><given-names>H</given-names> </name><name name-style="western"><surname>Diao</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Shi</surname><given-names>T</given-names> </name><etal/></person-group><article-title>A review of deep learning-based multiple-lesion recognition from medical images: classification, detection and segmentation</article-title><source>Comput Biol Med</source><year>2023</year><month>05</month><volume>157</volume><fpage>106726</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2023.106726</pub-id><pub-id pub-id-type="medline">36924732</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hamdi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Senan</surname><given-names>EM</given-names> </name><name name-style="western"><surname>Jadhav</surname><given-names>ME</given-names> </name><name name-style="western"><surname>Olayah</surname><given-names>F</given-names> </name><name name-style="western"><surname>Awaji</surname><given-names>B</given-names> </name><name name-style="western"><surname>Alalayah</surname><given-names>KM</given-names> </name></person-group><article-title>Hybrid models based on fusion features of a CNN and handcrafted features for accurate histopathological image analysis for diagnosing malignant lymphomas</article-title><source>Diagnostics (Basel)</source><year>2023</year><month>07</month><day>4</day><volume>13</volume><issue>13</issue><fpage>2258</fpage><pub-id pub-id-type="doi">10.3390/diagnostics13132258</pub-id><pub-id pub-id-type="medline">37443652</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Srisuwananukorn</surname><given-names>A</given-names> </name><name name-style="western"><surname>Salama</surname><given-names>ME</given-names> </name><name name-style="western"><surname>Pearson</surname><given-names>AT</given-names> </name></person-group><article-title>Deep learning applications in visual data for benign and malignant hematologic conditions: a systematic review and visual glossary</article-title><source>haematol</source><year>2023</year><volume>108</volume><issue>8</issue><fpage>1993</fpage><lpage>2010</lpage><pub-id pub-id-type="doi">10.3324/haematol.2021.280209</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Huang</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Guo</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Multi-scale feature similarity-based weakly supervised lymphoma segmentation in PET/CT images</article-title><source>Comput Biol Med</source><year>2022</year><month>12</month><volume>151</volume><fpage>106230</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.106230</pub-id><pub-id pub-id-type="medline">36306574</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Schoenpflug</surname><given-names>LA</given-names> </name><name name-style="western"><surname>Chatzipli</surname><given-names>A</given-names> </name><name name-style="western"><surname>Sirinukunwattana</surname><given-names>K</given-names> </name><etal/></person-group><article-title>Tumour purity assessment with deep learning in colorectal cancer and impact on molecular analysis</article-title><source>J Pathol</source><year>2025</year><month>02</month><volume>265</volume><issue>2</issue><fpage>184</fpage><lpage>197</lpage><pub-id pub-id-type="doi">10.1002/path.6376</pub-id><pub-id pub-id-type="medline">39710952</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Huang</surname><given-names>H</given-names> </name><name name-style="western"><surname>Yan</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Li</surname><given-names>B</given-names> </name><etal/></person-group><article-title>LungPath: artificial intelligence-driven histologic pattern recognition for improved diagnosis of early-stage invasive lung adenocarcinoma</article-title><source>Transl Lung Cancer Res</source><year>2024</year><month>08</month><day>31</day><volume>13</volume><issue>8</issue><fpage>1816</fpage><lpage>1827</lpage><pub-id pub-id-type="doi">10.21037/tlcr-24-258</pub-id><pub-id pub-id-type="medline">39263012</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ning</surname><given-names>X</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>R</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Development of a deep learning-based model to diagnose mixed-type gastric cancer accurately</article-title><source>Int J Biochem Cell Biol</source><year>2023</year><month>09</month><volume>162</volume><fpage>106452</fpage><pub-id pub-id-type="doi">10.1016/j.biocel.2023.106452</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Khalid</surname><given-names>M</given-names> </name><name name-style="western"><surname>Deivasigamani</surname><given-names>S</given-names> </name><name name-style="western"><surname>V</surname><given-names>S</given-names> </name><name name-style="western"><surname>Rajendran</surname><given-names>S</given-names> </name></person-group><article-title>An efficient colorectal cancer detection network using atrous convolution with coordinate attention transformer and histopathological images</article-title><source>Sci Rep</source><year>2024</year><month>08</month><day>17</day><volume>14</volume><issue>1</issue><fpage>19109</fpage><pub-id pub-id-type="doi">10.1038/s41598-024-70117-y</pub-id><pub-id pub-id-type="medline">39154091</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Supplementary figures and tables.</p><media xlink:href="medinform_v14i1e72679_app1.docx" xlink:title="DOCX File, 523 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Visual abstract.</p><media xlink:href="medinform_v14i1e72679_app2.docx" xlink:title="DOCX File, 357 KB"/></supplementary-material></app-group></back></article>