<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Med Inform</journal-id><journal-id journal-id-type="publisher-id">medinform</journal-id><journal-id journal-id-type="index">7</journal-id><journal-title>JMIR Medical Informatics</journal-title><abbrev-journal-title>JMIR Med Inform</abbrev-journal-title><issn pub-type="epub">2291-9694</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v14i1e81181</article-id><article-id pub-id-type="doi">10.2196/81181</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Identification and Localization of Breast Tumor Components via a Convolutional Neural Network Based on High-Frequency Ultrasound Combined With Histopathologic Registration: Prospective Study</article-title></title-group><contrib-group><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Yao</surname><given-names>Jia-Qian</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Zhou</surname><given-names>Wen-Wen</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Chai</surname><given-names>Zhi-Fei</given-names></name><degrees>ME</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Ren</surname><given-names>Fei</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Huang</surname><given-names>Tong-Yi</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Zhen</surname><given-names>Tian-Tian</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Shi</surname><given-names>Hui-Juan</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Xie</surname><given-names>Xiao-Yan</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Zhao</surname><given-names>Ze</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Xu</surname><given-names>Ming</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Medical Ultrasonics, The First Affiliated Hospital, Sun Yat-sen University</institution><addr-line>58 Zhongshan 2nd Road</addr-line><addr-line>Guangzhou</addr-line><country>China</country></aff><aff id="aff2"><institution>Department of Medical Ultrasonics, Suzhou Municipal Hospital Affiliated with Nanjing Medical University</institution><addr-line>Suzhou</addr-line><country>China</country></aff><aff id="aff3"><institution>Institute of Computing Technology, Chinese Academy of Sciences</institution><addr-line>Beijing</addr-line><country>China</country></aff><aff id="aff4"><institution>Department of Pathology, The First Affiliated Hospital, Sun Yat-sen University</institution><addr-line>Guangzhou</addr-line><country>China</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Coristine</surname><given-names>Andrew</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Yongping</surname><given-names>Liang</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Bhatnagar</surname><given-names>Priyanshi</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Ming Xu, MD, PhD, Department of Medical Ultrasonics, The First Affiliated Hospital, Sun Yat-sen University, 58 Zhongshan 2nd Road, Guangzhou, 510080, China, +86-020-8776 518; <email>xuming8@mail.sysu.edu.cn</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>these authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>23</day><month>1</month><year>2026</year></pub-date><volume>14</volume><elocation-id>e81181</elocation-id><history><date date-type="received"><day>23</day><month>07</month><year>2025</year></date><date date-type="accepted"><day>17</day><month>12</month><year>2025</year></date></history><copyright-statement>&#x00A9; Jia-Qian Yao, Wen-Wen Zhou, Zhi-Fei Chai, Fei Ren, Tong-Yi Huang, Tian-Tian Zhen, Hui-Juan Shi, Xiao-Yan Xie, Ze Zhao, Ming Xu. Originally published in JMIR Medical Informatics (<ext-link ext-link-type="uri" xlink:href="https://medinform.jmir.org">https://medinform.jmir.org</ext-link>), 23.1.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Informatics, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://medinform.jmir.org/">https://medinform.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://medinform.jmir.org/2026/1/e81181"/><abstract><sec><title>Background</title><p>Given the highly heterogeneous biology of breast cancer, a more effective noninvasive diagnostic tool that unravels microscopic histopathology patterns is urgently needed.</p></sec><sec><title>Objective</title><p>This study aims to identify cancerous regions in ultrasound images of breast cancer via convolutional neural network based on registered grayscale ultrasound images and readily accessible biopsy whole slide images (WSIs).</p></sec><sec sec-type="methods"><title>Methods</title><p>This single-center study prospectively included participants undergoing ultrasound-guided core needle biopsy procedures for Breast Imaging Reporting and Data System category 4 or 5 breast lesions for whom breast cancer was pathologically confirmed from July 2022 to February 2023 consecutively. The basic information, ultrasound image data, biopsy tissue specimens, and corresponding WSIs were collected. After core needle biopsy procedures, the stained breast tissue specimens were sliced and coregistered with an ultrasound image of a needle tract. Convolutional neural network models for identifying breast cancer cells in ultrasound images were developed using FCN-101 and DeepLabV3 networks. The image-level predictive performance was evaluated and compared quantitatively by pixel accuracy, Dice similarity coefficient, and recall. Pixel-level classification was illustrated through confusion matrices. The cancerous region in the testing dataset was further visualized in ultrasound images. Potential clinical applications were qualitatively assessed by comparing the automatic segmentation results and the actual pathological tissue distributions.</p></sec><sec sec-type="results"><title>Results</title><p>A total of 105 participants with 386 ultrasound images of breast cancer were included, with 270 (70%), 78 (20.2%), and 38 (9.8%) images in the training, validation, and test datasets, respectively. Both models performed well in predicting the cancerous regions in the biopsy area, whereas the FCN-101 model was superior to the DeepLabV3 model in terms of pixel accuracy (86.91% vs 69.55%; <italic>P</italic>=.002) and Dice similarity coefficient (77.47% vs 69.90%; <italic>P</italic>&#x003C;.001). The two models yielded recall values of 54.64% and 58.46%, with no significant difference between them (<italic>P</italic>=.80). Furthermore, the FCN-101 model had an advantage in predicting cancerous regions, while the DeepLabV3 model achieved more accurate predictive pixels in normal tissue (both <italic>P</italic>&#x003C;.05). Visualization of cancerous regions on grayscale ultrasound images demonstrated high consistency with those identified on WSIs.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>The technique for spatial registration of breast WSIs and ultrasound images of a needle tract was established. Breast cancer regions were accurately identified and localized on a pixel level in high-frequency ultrasound images via an advanced convolutional neural network with histopathologic WSI as the reference standard.</p></sec></abstract><kwd-group><kwd>breast</kwd><kwd>neoplasms</kwd><kwd>biopsy</kwd><kwd>ultrasonography</kwd><kwd>registration</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Breast cancer heterogeneity has induced challenges in treatment planning and follow-up management, which leads to unfavorable outcomes [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. Currently, ultrasound is a widely used diagnostic tool for breast cancer management, particularly valuable in screening, positive diagnosis, and treatment response assessment [<xref ref-type="bibr" rid="ref3">3</xref>]. However, the biological heterogeneity of breast cancers leads to varied morphological features on ultrasound [<xref ref-type="bibr" rid="ref4">4</xref>], often resulting in malignancy underestimation and overestimation. Furthermore, there is a heterogeneous response to treatment among patients with breast cancer [<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref7">7</xref>]. Curative effect assessment secondary to preoperative neoadjuvant treatment is largely based on cancer volume changes [<xref ref-type="bibr" rid="ref8">8</xref>], as well as biopsy for further validation [<xref ref-type="bibr" rid="ref9">9</xref>]. A more accurate noninvasive diagnostic tool that indicates living cancer cells in breast cancer is urgently needed [<xref ref-type="bibr" rid="ref10">10</xref>-<xref ref-type="bibr" rid="ref12">12</xref>].</p><p>Hematoxylin and eosin (H&#x0026;E) staining of breast tissue captured via core needle biopsy (CNB) has been introduced to reflect the underlying cellular and molecular information [<xref ref-type="bibr" rid="ref13">13</xref>-<xref ref-type="bibr" rid="ref15">15</xref>]. Preoperative diagnosis and curative effect assessment of breast cancer can be undermined by insufficient and nonrepresentative tissue owing to the heterogeneous distribution of breast cancer [<xref ref-type="bibr" rid="ref16">16</xref>]. Likewise, the partial samples obtained by CNB may not represent the entire lesion [<xref ref-type="bibr" rid="ref17">17</xref>]. There remains a need for standardized methods or imaging biomarkers available for accurately localizing histopathological cancerous subregions.</p><p>The convolutional neural network (CNN), a developed type of deep learning algorithm, has shown remarkable performance in correlating macroscopic imaging and microscopic histopathologic microstructure. A previous study showed that a multimodal radiomics model combining ultrasound and whole slide image (WSI) can effectively distinguish between luminal and nonluminal breast cancers [<xref ref-type="bibr" rid="ref18">18</xref>]. Other studies have explored using the deep learning algorithm for correlation between magnetic resonance imaging (MRI) and whole-mount specimen images to localize prostate cancer [<xref ref-type="bibr" rid="ref19">19</xref>]. Theoretically, these approaches may also be applicable in the ultrasound identification and localization of cancerous regions in breast cancer. Nonetheless, few studies have focused on this aspect. The correlation of ultrasound modality and readily accessible biopsy WSI remains open to question.</p><p>Therefore, this study aims to identify and localize cancerous regions in breast cancer based on a CNN algorithm that integrates high-frequency ultrasound (HFUS) images with WSI histopathology. The predictive performance of the model will be assessed.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Study Population</title><p>Consenting participants were recruited between July 2022 and February 2023 and were included as the training, validation, and test population. The eligibility criteria included the following: (1) index lesion was defined as category 4 or 5 according to the fifth edition of the American College of Radiology Breast Imaging Reporting and Data System (BI-RADS) of ultrasound [<xref ref-type="bibr" rid="ref20">20</xref>]; (2) index lesion was visible on HFUS, and complete imaging data were stored; (3) underwent ultrasound-guided breast lesion biopsy and the histopathology indicated breast cancer. Participants were excluded if (1) the biopsy specimen was incomplete or inaccessible; (2) the breast ultrasound images were incomplete; (3) there was a history of treatment for breast cancer (surgery, antihormonal therapy, immunotherapy, and radiation therapy); (4) the pathologic diagnosis was incomplete. The study sample included 163 consecutive participants undergoing ultrasound-guided breast biopsy for suspicion of cancer. Of these, three participants whose ultrasound images had poor quality and one who did not provide research consent were excluded. Participants with incomplete, fractional tissue specimens (n=2) and benign histology reports (n=52) were also excluded.</p></sec><sec id="s2-2"><title>Ultrasound and CNB Examination</title><p>The overall design of this study is shown in <xref ref-type="fig" rid="figure1">Figure 1</xref>. Expert radiologists with at least 10 years of experience performed breast ultrasound examinations and ultrasound-guided CNB procedures following the standard practice protocol. Location, number, and morphologic characteristics (size, shape, orientation, margin, echo pattern, vascularity, and calcifications) of lesions were identified and categorizations were assigned by the expert radiologists according to the fifth edition of ultrasound BI-RADS [<xref ref-type="bibr" rid="ref20">20</xref>]. An Aplio i900 Ultrasound System (Canon) with an i24LX8 high-frequency linear probe (frequency range: 8.0&#x2010;18.0 MHz) was used to generate breast ultrasound images. All images were stored in DICOM format for subsequent analysis.</p></sec><sec id="s2-3"><title>WSI Acquisition</title><p>When breast cancer was suspected, an ultrasound-guided CNB procedure was performed by expert radiologists to determine the diagnosis. MAGNUM biopsy instruments (BARD) with disposable core tissue biopsy needles (16G, 22 mm, MN1620, BARD) were adopted. During the procedure, the needle tip was positioned approximately 0.5 cm from the target biopsy region. The radiologists aligned the ultrasound probe and needle to visualize the entire needle tract. Subtle needle deflections were occasionally observed. These deflections were dynamically corrected in real time by adjusting the needle trajectory. The radiologists captured two B-mode HFUS images per biopsy for subsequent registration, including one pre-fire and one post-fire. To ensure the specimens were representative, biopsies were taken by the radiologist from different regions of the lesion, typically 4&#x2010;6 samples, focusing on solid areas on B-mode ultrasound or areas with abundant vascularity on color Doppler flow imaging. When multiple lesions were encountered, the most suspicious lesion for malignancy was chosen for analysis. To facilitate the follow-up spatial registration of ultrasound and histopathologic images, the needle tip side of the biopsy specimen from the index lesion was stained with biological tissue dye (BIOGNOST), depicted in Figure S1 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>. After 2 to 5 minutes of coloration, the biopsy specimens were placed in 10% neutral buffered formalin for fixation and sent for histopathologic examination.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Workflow of the proposed cancerous regions identification protocol in this study. First, we got cropped HFUS images and corresponding spatially aligned biopsy WSIs. Second, a registration process was applied to achieve anatomic correlation. Third, the segmentation model was constructed using the FCN-101 and DeepLabV3 architectures. AI: artificial intelligence; HFUS: high-frequency ultrasound; ROI: region of interest; WSI: whole slide image.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v14i1e81181_fig01.png"/></fig><p>The tissue samples were fixed and oriented during the embedding process to preserve the longitudinal axis. Each 3 &#x03BC;m histological slide was then sectioned parallel to the initial needle trajectory, thereby ensuring that the analyzed WSI represented the same gross anatomical plane captured by HFUS. The tissue strips were very small, thus the dimensional changes and distortion introduced by histological processing were assumed to be limited. Each section was stained with H&#x0026;E. For each participant, all slides stained with H&#x0026;E were reviewed by two experienced breast pathologists (with 5 y and 10 y of experience), and the histopathologic type was reported. Scanning of the H&#x0026;E slides was performed using a KFBIO Digital Pathology Slide Scanner (KF-PRO-020) with a 200X objective lens. Representative heterogeneous cancer cell distribution of breast cancer in biopsy WSI is depicted in Figure S2 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p></sec><sec id="s2-4"><title>Imaging Registration</title><p>The expert pathologist with 10 years of experience used the open-source software QuPath (version 0.4.0) for digital histopathology analysis. The maximal cross-section of the extracted core was used for analysis. Cancer cells were identified based on nuclear atypia and mitotic figures. The regions of interest (ROIs) of cancerous regions were manually outlined on all high-resolution WSI slices, generating a per-pixel cancer cell labeling (<xref ref-type="fig" rid="figure2">Figure 2</xref>).</p><p>For 4&#x2010;6 samples from one lesion, the expert pathologist selected 1&#x2010;4 specimens for further registration processing. The method for selecting was based on three criteria: (1) intact and well-formed; (2) length matches the needle notch (22 mm); (3) clear and distinguishable staining. For each selected tissue sample, one radiologist with at least 5 years of experience correlated the WSI to two captured B-mode HFUS images. To ensure the spatial registration from WSI to the HFUS images, the radiologist compares the pre-fire and post-fire HFUS images using Photoshop software (version CC 2019; Adobe Inc). In the software, the needle tip position in the pre-fire image was taken as the starting point; actual length of the biopsy specimen was determined by the needle projection distance in the post-fire image. Then, the radiologist cropped the biopsy area in the pre-fire image for subsequent annotation and analysis. The detailed image cropping process is demonstrated in <xref ref-type="fig" rid="figure2">Figure 2</xref>. The HFUS image of the needle tract was then saved in JPG format. The ROIs outlined in WSIs were converted to correlated HFUS images via Labelme software, an open-source image annotation tool (version 5.1.0).</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Spatial registration and mapping protocol between HFUS and WSI. We compared the pre-fire image and post-fire image to crop the needle tract in HFUS images. An experienced pathologist manually outlined the cancerous cells on WSI images using QuPath software (version 0.4.0 [<xref ref-type="bibr" rid="ref21">21</xref>]), which were transferred to the cropped HFUS images, creating the labeled dataset for training. HFUS: high-frequency ultrasound; ROI: region of interest; WSI: whole slide image.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v14i1e81181_fig02.png"/></fig></sec><sec id="s2-5"><title>Data Preprocessing</title><p>The HFUS images and their ROIs annotation were obtained to establish the CNN prediction model. Open-source libraries including Python (version 3.8.11; Python Software Foundation), Imgviz (version 1.5.1), and Numpy (version 1.21.6) were used to convert the dataset into the standard VOC format. To enhance the robustness and generalization capability of the model, data augmentation strategies, such as random horizontal flipping, rotation, and pixel transformations, were used to increase the generalizability of the model. The dataset was randomly divided into training (n=270, 70%), validation (n=78, 20.2%), and test (n=38, 9.8%) subsets using a 7:2:1 ratio with no overlap between the subsets. Image pixel values were normalized using the <italic>z</italic>-score method to reduce computation burden and accelerate model convergence.</p></sec><sec id="s2-6"><title>Model Development</title><p>The fully automated segmentation CNN model was designed to identify cancerous regions in breast cancer based on HFUS images, with the ROIs in WSI as the reference standard. The independent CNN model was trained with advanced FCN-101 [<xref ref-type="bibr" rid="ref22">22</xref>] and DeepLabV3 [<xref ref-type="bibr" rid="ref23">23</xref>] networks as the backbone, separately. During training, the models were adapted using the AdamW optimized algorithm and CosineAnnealing learning rate adjustment curve to improve efficiency. The weighted Dice loss function was used to mitigate the data imbalance during the training process, as plotted in Figure S3 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>. The iteration parameters with the lowest loss values were chosen, which were 0.6626 for the FCN-101 model and 0.7285 for the DeepLabV3 model. In the fine-tuned process, parameters were selected to construct the segmentation model when the best performance on the test dataset was achieved. Finally, cancerous regions in breast cancer were localized. More details on the CNN model are provided in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>. The source code for our CNN models is available on GitHub [<xref ref-type="bibr" rid="ref24">24</xref>].</p></sec><sec id="s2-7"><title>Statistical Analysis</title><p>All statistical analysis was conducted with IBM SPSS Statistics 25.0 (version 25.0; IBM Corp), Python (version 3.8.11; Python Software Foundation), and R software (version 4.2.1, R Foundation for Statistical Computing). Continuous variables were exhibited as means (SDs) or medians and IQRs and compared using a 1-sample <italic>t</italic> test or Mann-Whitney <italic>U</italic> test where appropriate. Categorical variables were expressed as counts and percentages and compared through <italic>&#x03C7;</italic><sup>2</sup> tests. To assess and compare the segmentation performance of CNN models based on independent networks, pixel accuracy (PA), Dice similarity coefficient (DSC), mean Intersection over Union, precision, and recall were calculated. These metrics were computed separately for each test image, and the final performance values were obtained by averaging the image-level results across the entire dataset. To avoid inflated degrees of freedom arising from multiple images per participant, a paired-sample <italic>t</italic> test was performed at the participant level to compare the performance of FCN-101 and DeepLabV3 models. Normality of the paired differences was confirmed using the Shapiro-Wilk test. Detailed evaluation metrics were demonstrated in the Supplementary Methods in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>. The confusion matrices were adopted to show the pixel-level classification of cancerous regions and normal tissue based on aggregated raw pixel counts. Two-tailed <italic>P</italic>&#x003C;.05 was considered statistically significant.</p></sec><sec id="s2-8"><title>Ethical Considerations</title><p>This single-center prospective study involving human participants was approved by the local institutional review board of the First Affiliated Hospital of Sun Yat-sen University (Ethics [2023]842). Written informed consent was obtained from all participants prior to their inclusion in the study. This study was conducted in accordance with the Declaration of Helsinki and its subsequent amendments. All data were deidentified before analysis to ensure participant privacy. Participants did not receive any financial or material compensation, as all procedures were part of routine clinical care, and the analytical use of the resulting data was clearly explained to participants during enrollment. No identifiable images or personal information are included.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Participant Characteristics</title><p>A total of 163 individual participants received ultrasound-guided breast biopsy, and 58 participants were excluded (<xref ref-type="fig" rid="figure3">Figure 3</xref>). The final prospective dataset contained 105 participants (mean age, 53.7, SD 11.3 y old; all female) diagnosed as breast cancer with 386 HFUS images of the needle tract. All biopsies ultimately achieved successful targeting and adequate sampling in the 105 participants. Among them, 64 (87.7%), 19 (95%), and 9 (75%) invasive breast cancer and 9 (12.3%), 1 (5%), and 3 (25%) ductal carcinomas in situ were identified in the training, validation, and test subsets, respectively. Characteristics were compared between three subsets in <xref ref-type="table" rid="table1">Table 1</xref> and no evidence of a statistical difference was observed, except for vascularity, where internal vascularity of breast lesions was observed in the training dataset (<italic>P</italic>=.03).</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Flowchart showing inclusion and exclusion criteria of participants. BI-RADS: Breast Imaging Reporting and Data System; US: ultrasound; WSI: whole slide image.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v14i1e81181_fig03.png"/></fig><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Participants and breast cancer characteristics. Unless otherwise indicated, data are numbers of lesions, with the percentages in parentheses.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Characteristic</td><td align="left" valign="bottom">Training set (n=73)</td><td align="left" valign="bottom">Validation set (n=20)</td><td align="left" valign="bottom">Testing set (n=12)</td><td align="left" valign="bottom"><italic>P</italic> value</td></tr></thead><tbody><tr><td align="left" valign="top">Age (y), mean (SD)</td><td align="left" valign="top">52.7 (SD 11.2)</td><td align="left" valign="top">57.8 (SD 13.2)</td><td align="left" valign="top">52.8 (SD 7.4)</td><td align="left" valign="top">.21</td></tr><tr><td align="left" valign="top" colspan="4">Lesion number, n (%)</td><td align="left" valign="top">.08</td></tr><tr><td align="left" valign="top">&#x2003;Solitary</td><td align="left" valign="top">64 (87.7)</td><td align="left" valign="top">19 (95)</td><td align="left" valign="top">8 (66.7)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">&#x2003;Numerous</td><td align="left" valign="top">9 (12.3)</td><td align="left" valign="top">1 (5)</td><td align="left" valign="top">4 (33.3)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="4">BI-RADS<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup> categorization, n (%)</td><td align="left" valign="top">.96</td></tr><tr><td align="left" valign="top">&#x2003;BI-RADS 4a</td><td align="left" valign="top">5 (6.8)</td><td align="left" valign="top">1 (5)</td><td align="left" valign="top">1 (8.3)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">&#x2003;BI-RADS 4b</td><td align="left" valign="top">9 (12.3)</td><td align="left" valign="top">3 (15)</td><td align="left" valign="top">1 (8.3)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">&#x2003;BI-RADS 4c</td><td align="left" valign="top">18 (24.7)</td><td align="left" valign="top">3 (15)</td><td align="left" valign="top">2 (16.7)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">&#x2003;BI-RADS 5</td><td align="left" valign="top">40 (54.8)</td><td align="left" valign="top">13 (65)</td><td align="left" valign="top">8 (66.7)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="4">Lesion location, n (%)</td><td align="left" valign="top">.68</td></tr><tr><td align="left" valign="top">&#x2003;Left breast</td><td align="left" valign="top">32 (43.8)</td><td align="left" valign="top">11 (55)</td><td align="left" valign="top">6 (50)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">&#x2003;Right breast</td><td align="left" valign="top">41 (56.2)</td><td align="left" valign="top">9 (45)</td><td align="left" valign="top">6 (50)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="4">Quadrant, n (%)</td><td align="left" valign="top">.30</td></tr><tr><td align="left" valign="top">&#x2003;Upper outer quadrant</td><td align="left" valign="top">45 (61.6)</td><td align="left" valign="top">9 (45)</td><td align="left" valign="top">5 (41.7)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">&#x2003;Lower outer quadrant</td><td align="left" valign="top">10 (13.7)</td><td align="left" valign="top">1 (5)</td><td align="left" valign="top">2 (16.7)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">&#x2003;Upper inner quadrant</td><td align="left" valign="top">12 (16.4)</td><td align="left" valign="top">6 (30)</td><td align="left" valign="top">3 (25)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">&#x2003;Lower inner quadrant</td><td align="left" valign="top">6 (8.2)</td><td align="left" valign="top">4 (20)</td><td align="left" valign="top">2 (16.7)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">Volume (mm<sup>3</sup>), mean (SD)</td><td align="left" valign="top">4.1 (SD 5.1)</td><td align="left" valign="top">3.4 (SD 2.8)</td><td align="left" valign="top">4.3 (SD 7.0)</td><td align="left" valign="top">.82</td></tr><tr><td align="left" valign="top" colspan="4">Orientation, n (%)</td><td align="left" valign="top">.22</td></tr><tr><td align="left" valign="top">&#x2003;Parallel</td><td align="left" valign="top">82 (89.1)</td><td align="left" valign="top">15 (75)</td><td align="left" valign="top">11 (91.7)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">&#x2003;Not parallel</td><td align="left" valign="top">10 (10.9)</td><td align="left" valign="top">5 (25)</td><td align="left" valign="top">1 (8.3)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="4">Echo pattern, n (%)</td><td align="left" valign="top">.78</td></tr><tr><td align="left" valign="top">&#x2003;Hypoechoic</td><td align="left" valign="top">86 (93.5)</td><td align="left" valign="top">20 (100)</td><td align="left" valign="top">12 (100)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">&#x2003;Heterogeneous</td><td align="left" valign="top">6 (6.5)</td><td align="left" valign="top">0 (0)</td><td align="left" valign="top">0 (0)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="4">Margin, n (%)</td><td align="left" valign="top">.52</td></tr><tr><td align="left" valign="top">&#x2003;Circumscribed</td><td align="left" valign="top">6 (8.2)</td><td align="left" valign="top">2 (10)</td><td align="left" valign="top">2 (16.7)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">&#x2003;Not circumscribed</td><td align="left" valign="top">67 (91.8)</td><td align="left" valign="top">18 (90)</td><td align="left" valign="top">10 (83.3)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="4">Shape, n (%)</td><td align="left" valign="top">.47</td></tr><tr><td align="left" valign="top">&#x2003;Oval/Round</td><td align="left" valign="top">3 (4.1)</td><td align="left" valign="top">0 (0.0)</td><td align="left" valign="top">1 (8.3)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">&#x2003;Irregular</td><td align="left" valign="top">70 (95.9)</td><td align="left" valign="top">20 (100.0)</td><td align="left" valign="top">11 (91.7)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="4">Calcification, n (%)</td><td align="left" valign="top">.40</td></tr><tr><td align="left" valign="top">&#x2003;Absence</td><td align="left" valign="top">36 (49.3)</td><td align="left" valign="top">25 (59.5)</td><td align="left" valign="top">8 (66.7)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">&#x2003;Calcification in a mass</td><td align="left" valign="top">37 (50.7)</td><td align="left" valign="top">17 (40.5)</td><td align="left" valign="top">4 (33.3)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="4">Vascularity, n (%)</td><td align="left" valign="top">.03</td></tr><tr><td align="left" valign="top">&#x2003;Absent</td><td align="left" valign="top">11 (15.1)</td><td align="left" valign="top">7 (35)</td><td align="left" valign="top">2 (16.7)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">&#x2003;Internal vascularity</td><td align="left" valign="top">56 (76.7)</td><td align="left" valign="top">11 (55)</td><td align="left" valign="top">7 (58.3)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">&#x2003;Vessels in rim</td><td align="left" valign="top">6 (18.2)</td><td align="left" valign="top">2 (10)</td><td align="left" valign="top">4 (33.3)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="4">Histopathologic pattern, n (%)</td><td align="left" valign="top">.13</td></tr><tr><td align="left" valign="top">&#x2003;Invasive breast cancer</td><td align="left" valign="top">64 (87.7)</td><td align="left" valign="top">19 (95)</td><td align="left" valign="top">9 (75)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">&#x2003;Ductal carcinoma in situ</td><td align="left" valign="top">9 (12.3)</td><td align="left" valign="top">1 (5)</td><td align="left" valign="top">3 (25)</td><td align="left" valign="top"/></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>BI-RADS: Breast Imaging Reporting and Data System.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-2"><title>Model Evaluation</title><p>The segmentation capacity of the image-based CNN model based on FCN-101 and DeepLabV3 in the test dataset is provided in <xref ref-type="table" rid="table2">Table 2</xref>. The table presents image-level descriptive statistics, while all inferential comparisons were conducted at the participant level (n=12). In the test dataset, FCN-101 showed higher accuracy (PA: 86.91% vs 69.55%, <italic>P</italic>=.002), similarity (DSC: 77.47% vs 69.90%, <italic>P</italic>&#x003C;.001), mean Intersection over Union (67.47% vs 60.29%, <italic>P</italic>&#x003C;.001), and precision (66.01% vs 56.15%, <italic>P</italic>&#x003C;.001) compared to DeepLabV3. There was no evidence of a difference in recall (54.64% vs 58.46%, <italic>P</italic>=.80) between the two algorithms.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Comparison of prediction performance of FCN-101 and DeepLabV3 models in predicting cancerous regions in breast cancer in the test dataset. All metrics reflect image-level performance and are expressed as percentages. Statistical comparisons (<italic>P</italic> values) were performed at the participant level using paired analyses.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Evaluation metric</td><td align="left" valign="bottom">FCN-101, % (95% CI)</td><td align="left" valign="bottom">DeepLabV3, % (95% CI)</td><td align="left" valign="bottom"><italic>P</italic> value</td></tr></thead><tbody><tr><td align="left" valign="top">PA<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup></td><td align="char" char="." valign="top">86.91 (80.30-94.77)</td><td align="char" char="." valign="top">69.55 (65.82-73.99)</td><td align="char" char="." valign="top">.002</td></tr><tr><td align="left" valign="top">DSC<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup></td><td align="char" char="." valign="top">77.47 (70.74-85.88)</td><td align="char" char="." valign="top">69.90 (63.43-75.02)</td><td align="char" char="." valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">mIoU<sup><xref ref-type="table-fn" rid="table2fn3">c</xref></sup></td><td align="char" char="." valign="top">67.47 (59.70-75.54)</td><td align="char" char="." valign="top">60.29 (54.19-66.57)</td><td align="char" char="." valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Precision</td><td align="char" char="." valign="top">66.01 (55.69-73.45)</td><td align="char" char="." valign="top">56.15 (48.12-64.17)</td><td align="char" char="." valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Recall</td><td align="char" char="." valign="top">54.64 (45.57-63.75)</td><td align="char" char="." valign="top">58.46 (49.03-65.66)</td><td align="char" char="." valign="top">.80</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>PA: pixel accuracy.</p></fn><fn id="table2fn2"><p><sup>b</sup>DSC: Dice similarity coefficient.</p></fn><fn id="table2fn3"><p><sup>c</sup>mIoU: mean Intersection over Union.</p></fn></table-wrap-foot></table-wrap><p>The confusion matrix presented in <xref ref-type="fig" rid="figure4">Figure 4</xref> evaluates the pixel categorization capacity, providing further insight into the per-pixel predictions of each model. The results indicated that the FCN-101 model successfully predicted the majority of cancerous pixels (5,846,319 vs 4,649,445 pixels; <italic>P</italic>&#x003C;.05). However, the DeepLabV3 model demonstrated more accurate predictive pixels in background components (2,440,670 vs 1,895,664 pixels; <italic>P</italic>&#x003C;.05). Based on the pixel-wise confusion matrices, the recall and specificity of FCN-101 were 86.37% and 43.07%, while those of DeepLabV3 reached 68.69% and 55.46%.</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Confusion matrices for predicting cancerous regions. Confusion matrices were applied to summarize pixel-level classification outcomes aggregated across the entire test dataset for (A) FCN-101 and (B) DeepLabV3, indicating discordance or concordance with the ground truth (cancerous cell/background) from WSI results. WSI: whole slide image.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v14i1e81181_fig04.png"/></fig></sec><sec id="s3-3"><title>Model Visualization</title><p>In the test dataset, the automatic segmentation results of cancerous regions based on the two networks were visually displayed and qualitatively compared with the gold standard to evaluate the predictive performance of the models. Cancerous regions in the CNB biopsy area of three different participants&#x2019; breast HFUS images were identified and localized in <xref ref-type="fig" rid="figure5">Figure 5</xref>, as generated by the FCN-101 and DeepLabV3 models. Cancerous region predictions by the two CNN models align closely with the actual histopathology. Additionally, even in challenging cases with unclear boundaries and mixed internal echoes, as represented in Example 2, both models were able to accurately predict the specific distribution of cancerous regions within the lesions.</p><fig position="float" id="figure5"><label>Figure 5.</label><caption><p>Examples of identifying cancerous regions in three participants with invasive breast cancer. (<bold>A</bold>) Original HFUS images, with biopsy needle (white arrow) pointing to the needle tract area (red frame). (<bold>B</bold>) Annotation of ground truth labels by an expert pathologist. (<bold>C</bold>) Prediction results of the FCN-101 model (gray area). (<bold>D</bold>) Prediction results of the DeepLabV3 model (gray area). HFUS: high-frequency ultrasound.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v14i1e81181_fig05.png"/></fig></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Results</title><p>Breast cancer is a group of highly heterogeneous diseases with varying imaging features. Thus, differential diagnosis through imaging is limited [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref26">26</xref>]. Development and validation of a fast and noninvasive method equal to histologic results is urgently needed. Here, we developed a cancerous region classifier using a deep learning network with true labels from radiology-pathology registration. The FCN-101 model was superior to the DeepLabV3 model in terms of PA (86.91% vs 69.55%; <italic>P</italic>=.002) and DSC (77.47% vs 69.90%; <italic>P</italic>&#x003C;.001). Recall values were 54.64% and 58.46%, with no significant difference observed between them (<italic>P</italic>=.80). The FCN-101 model excelled in identifying cancerous regions (5,846,319 vs 4,649,445 pixels; <italic>P</italic>&#x003C;.05), whereas DeepLabV3 was more accurate for normal tissue (2,440,670 vs 1,895,664 pixels; <italic>P</italic>&#x003C;.05) in pixel-level predictions. In the clinical workflow, the model could segment cancerous regions in grayscale ultrasound images of the breast. The results highlight the model&#x2019;s potential for advancing breast cancer assessment at the microscopic level via ultrasound imaging.</p><p>Efforts have been made to establish a methodology pertaining to image-histopathology registration [<xref ref-type="bibr" rid="ref27">27</xref>-<xref ref-type="bibr" rid="ref32">32</xref>]. For example, Ward et al [<xref ref-type="bibr" rid="ref27">27</xref>] and Kwak et al [<xref ref-type="bibr" rid="ref28">28</xref>] demonstrated accurate alignment between MRIs and digital histopathologic analyses in patients with prostate cancer. In addition, Wildeboer et al [<xref ref-type="bibr" rid="ref29">29</xref>] established a multiparametric machine learning on ultrasound for histopathology localization of prostate cancer. However, similar research in breast imaging remains limited. In our study, WSIs from CNB provided accessible clinical data suitable for state-of-the-art deep learning algorithms.</p><p>Thus, a strength of this study is that we combined accessible CNB tissue samples with real-time ultrasound imaging to facilitate the registration process. The biopsy needle targeted the desired section plane while the tissue sample was obtained at the same level, which was crucial for registration. Moreover, HFUS offers spatial tissue distribution data, enhancing dynamic biopsy procedures and clinical utility. However, subtle registration errors existed; for example, the biopsy needle was deflected when encountering small breast tumors or rigid glandular tissue [<xref ref-type="bibr" rid="ref33">33</xref>]. To address this, our skilled operators actively mitigated needle deflections. Pre-fire and post-fire ultrasound images were compared to ensure that the extracted tissue precisely corresponded to the visualized ultrasound plane.</p><p>Studies have investigated the biopsy efficacy between existing methods. Currently, three image-guided breast biopsy techniques are used: stereotactic-, ultrasound-, and MRI-guided biopsies. Stereotactic biopsy is indicated for calcifications, and MRI-guided biopsy is indicated for lesions visible only on MRI; both primarily assess ductal proliferative lesions. Ultrasound-guided biopsy, however, applies to a broader range of breast lesions [<xref ref-type="bibr" rid="ref34">34</xref>]. Yashima et al [<xref ref-type="bibr" rid="ref35">35</xref>] retrospectively compared the positive biopsy rate in 453 patients with 500 lesions that underwent ultrasound-guided core needle biopsy or vacuum-assisted biopsy and reported positive biopsy rates of 61.9% and 72.4% (<italic>P</italic>=.032), respectively. Unrepresentative CNB specimens might not fully reflect the overall characteristics of the tumor [<xref ref-type="bibr" rid="ref17">17</xref>]. Although multipoint sampling and repetitive biopsy increased the detection rate, they also increased complications like bleeding, infection, and tumor spreading [<xref ref-type="bibr" rid="ref36">36</xref>]. Based on this, our model may generate real-time cancer prediction heatmaps, where suspicious regions are highlighted to assist biopsy site selection during CNB sampling. However, prior to clinical use, the proposed approach should undergo rigorous supervision and ethical evaluation to ensure its safety and reliability in guiding biopsy decisions.</p><p>The visualized cancerous map was used to enhance the interpretability of the model. Here, we found that distribution maps facilitated the assessment of cancerous regions by highlighting the hypoechoic area in HFUS images (illustrated in <xref ref-type="fig" rid="figure5">Figure 5</xref>), which is in accordance with clinical routine. Furthermore, areas of abnormal echogenicity or edges of lesions often corresponded to cancerous regions identified in histopathology, which was also correctly predicted by the deep learning models. Guided by the ultrasound-based CNN algorithm, doctors could identify suspicious regions based on predicted cancerous regions, even in small lesions, helping to ensure sufficient and representative CNB samples for accurate histopathological assessment. However, the model is intended to assist clinicians and cannot replace their judgment in biopsy decision-making.</p><p>Notably, there is a discrepancy between the per-image recall and the pixel-level aggregated recall, which can be explained by the scale-dependent nature of these two evaluation strategies. This scale-dependent discrepancy indicates that the current model achieves higher sensitivity for larger tumors, while the detection of small lesions remains more challenging. It highlights an important direction for future optimization aimed at improving detection stability across different tumor sizes. Although the two models had advantages in predicting cancerous regions, the confusion matrix analysis revealed a relatively low specificity, particularly for the FCN-101 model (43.07%). This indicates the presence of false-positive segmentation, with a proportion of normal background pixels being incorrectly segmented as cancerous. Such oversegmentation may be acceptable in identifying suspicious regions when emphasizing sensitivity. However, in the context of biopsy guidance, such behavior highlights the need for further optimization toward more precise boundary discrimination. Future work will focus on improving specificity through better loss function design, boundary-aware learning, and postprocessing strategies.</p></sec><sec id="s4-2"><title>Limitations</title><p>This study combined registered WSIs and HFUS images to enhance cancerous region recognition in breast cancer, which has not been well-established in the literature. Still, we acknowledge the limitations of this study. First, it is a single-center prospective study with a small and potentially homogenous dataset, which undermines the model&#x2019;s generalizability and heightens the risk of overfitting. Besides, the small sample size in the test set limits statistical power and the generalizability of the prospective findings. To address this, we plan to incorporate data from multiple centers for robust external validation and conduct prospective studies to explore the models&#x2019; role in assisting breast biopsy and postoperative follow-up after neoadjuvant therapy. Second, the relatively low recall indicates that some malignant regions may be missed; future studies will focus on improving model sensitivity through ensemble and data balancing approaches. Besides, benchmarking against widely adopted baseline models (eg, U-Net) could be performed in future research. Third, there is an absence of molecular subtype or pathological classification analysis and an imbalance of specific subtypes with small sample sizes (ductal carcinoma in situ in the test set). Given that different subtypes and cancer types exhibit distinct morphological features, a subtype-specific analysis could reveal performance differences and lead to refined models optimized for specific subtypes. Fourth, other imaging modalities such as color Doppler flow imaging, elastography, and contrast-enhanced ultrasound also play paramount roles in breast cancer diagnosis. For example, the lower prevalence of internal vascularity in the test set compared with the training set may have influenced model performance. Combining the information from multiple modalities could potentially further improve the performance of the CNN model. Fifth, the ultrasound-histopathology registration was based on biopsy WSIs and ultrasound images of the needle tract. Spatial correspondence should be regarded as approximate, and pixel-level metrics may overestimate the physical precision, which necessitates further refinement and validation via the whole-mount specimen.</p></sec><sec id="s4-3"><title>Conclusion</title><p>In conclusion, we have proposed and evaluated deep learning models to identify cancerous regions in breast cancer in HFUS images through spatial registration of breast biopsy WSIs and HFUS images. This technique is potentially useful in conventional ultrasound examinations and ultrasound-guided breast biopsy procedures.</p></sec></sec></body><back><ack><p>During drafting and revision of the manuscript, we used ChatGPT 5.0 (OpenAI) to polish the work to refine the language and improve readability. The authors reviewed and edited the content to ensure accuracy and take full responsibility for the content of the publication.</p></ack><notes><sec><title>Funding</title><p>This study was supported by the National Scientific Foundation Committee of China (grants 82071951, 82102057, 82402297) and the Major Research Plan of the National Natural Science Foundation of China (grant 92059201). The funders had no role in the study design, data collection and analysis, decision to publish, or preparation of the manuscript.</p></sec><sec><title>Data Availability</title><p>All data supporting the findings are available from the corresponding author on reasonable request. Source code for our convolutional neural network models is deposited on GitHub [<xref ref-type="bibr" rid="ref37">37</xref>].</p></sec></notes><fn-group><fn fn-type="con"><p>JQY contributed to conceptualization, formal analysis, investigation, methodology, and writing &#x2013; original draft. WWZ contributed to data curation, formal analysis, investigation, methodology, and writing &#x2013; original draft. ZFC contributed to formal analysis, software, and validation. FR contributed to software, methodology, and supervision. TYH contributed to data curation, investigation, and methodology. TTZ contributed to data curation and investigation. HJS contributed to data curation and supervision. XYX contributed to conceptualization, funding acquisition, and supervision. ZZ contributed to conceptualization, methodology, software, supervision, and visualization. MX contributed to conceptualization, funding acquisition, project administration, and writing &#x2013; review and editing. JQY and WWZ are cofirst authors. ZZ and MX are cocorresponding authors.</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">BI-RADS</term><def><p>Breast Imaging Reporting and Data System</p></def></def-item><def-item><term id="abb2">CNB</term><def><p>core needle biopsy</p></def></def-item><def-item><term id="abb3">CNN</term><def><p>convolutional neural network</p></def></def-item><def-item><term id="abb4">DSC</term><def><p>Dice similarity coefficient</p></def></def-item><def-item><term id="abb5">H&#x0026;E</term><def><p>hematoxylin and eosin</p></def></def-item><def-item><term id="abb6">HFUS</term><def><p>high-frequency ultrasound</p></def></def-item><def-item><term id="abb7">MRI</term><def><p>magnetic resonance image/imaging</p></def></def-item><def-item><term id="abb8">PA</term><def><p>pixel accuracy</p></def></def-item><def-item><term id="abb9">ROI</term><def><p>region of interest</p></def></def-item><def-item><term id="abb10">WSI</term><def><p>whole slide image</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zardavas</surname><given-names>D</given-names> </name><name name-style="western"><surname>Irrthum</surname><given-names>A</given-names> </name><name name-style="western"><surname>Swanton</surname><given-names>C</given-names> </name><name name-style="western"><surname>Piccart</surname><given-names>M</given-names> </name></person-group><article-title>Clinical management of breast cancer heterogeneity</article-title><source>Nat Rev Clin Oncol</source><year>2015</year><month>07</month><volume>12</volume><issue>7</issue><fpage>381</fpage><lpage>394</lpage><pub-id pub-id-type="doi">10.1038/nrclinonc.2015.73</pub-id><pub-id pub-id-type="medline">25895611</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>L&#x00FC;&#x00F6;nd</surname><given-names>F</given-names> </name><name name-style="western"><surname>Tiede</surname><given-names>S</given-names> </name><name name-style="western"><surname>Christofori</surname><given-names>G</given-names> </name></person-group><article-title>Breast cancer as an example of tumour heterogeneity and tumour cell plasticity during malignant progression</article-title><source>Br J Cancer</source><year>2021</year><month>07</month><volume>125</volume><issue>2</issue><fpage>164</fpage><lpage>175</lpage><pub-id pub-id-type="doi">10.1038/s41416-021-01328-7</pub-id><pub-id pub-id-type="medline">33824479</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Berg</surname><given-names>WA</given-names> </name><name name-style="western"><surname>Bandos</surname><given-names>AI</given-names> </name><name name-style="western"><surname>Mendelson</surname><given-names>EB</given-names> </name><name name-style="western"><surname>Lehrer</surname><given-names>D</given-names> </name><name name-style="western"><surname>Jong</surname><given-names>RA</given-names> </name><name name-style="western"><surname>Pisano</surname><given-names>ED</given-names> </name></person-group><article-title>Ultrasound as the primary screening test for breast cancer: analysis from ACRIN 6666</article-title><source>J Natl Cancer Inst</source><year>2016</year><month>04</month><volume>108</volume><issue>4</issue><fpage>djv367</fpage><pub-id pub-id-type="doi">10.1093/jnci/djv367</pub-id><pub-id pub-id-type="medline">26712110</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kutasovic</surname><given-names>JR</given-names> </name><name name-style="western"><surname>McCart Reed</surname><given-names>AE</given-names> </name><name name-style="western"><surname>Sokolova</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lakhani</surname><given-names>SR</given-names> </name><name name-style="western"><surname>Simpson</surname><given-names>PT</given-names> </name></person-group><article-title>Morphologic and genomic heterogeneity in the evolution and progression of breast cancer</article-title><source>Cancers (Basel)</source><year>2020</year><month>03</month><day>31</day><volume>12</volume><issue>4</issue><fpage>848</fpage><pub-id pub-id-type="doi">10.3390/cancers12040848</pub-id><pub-id pub-id-type="medline">32244556</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Haque</surname><given-names>W</given-names> </name><name name-style="western"><surname>Verma</surname><given-names>V</given-names> </name><name name-style="western"><surname>Hatch</surname><given-names>S</given-names> </name><name name-style="western"><surname>Suzanne Klimberg</surname><given-names>V</given-names> </name><name name-style="western"><surname>Brian Butler</surname><given-names>E</given-names> </name><name name-style="western"><surname>Teh</surname><given-names>BS</given-names> </name></person-group><article-title>Response rates and pathologic complete response by breast cancer molecular subtype following neoadjuvant chemotherapy</article-title><source>Breast Cancer Res Treat</source><year>2018</year><month>08</month><volume>170</volume><issue>3</issue><fpage>559</fpage><lpage>567</lpage><pub-id pub-id-type="doi">10.1007/s10549-018-4801-3</pub-id><pub-id pub-id-type="medline">29693228</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fayanju</surname><given-names>OM</given-names> </name><name name-style="western"><surname>Ren</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Thomas</surname><given-names>SM</given-names> </name><etal/></person-group><article-title>The clinical significance of breast-only and node-only pathologic complete response (pCR) after neoadjuvant chemotherapy (NACT): a review of 20,000 breast cancer patients in the National Cancer Data Base (NCDB)</article-title><source>Ann Surg</source><year>2018</year><month>10</month><volume>268</volume><issue>4</issue><fpage>591</fpage><lpage>601</lpage><pub-id pub-id-type="doi">10.1097/SLA.0000000000002953</pub-id><pub-id pub-id-type="medline">30048319</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Romeo</surname><given-names>V</given-names> </name><name name-style="western"><surname>Accardo</surname><given-names>G</given-names> </name><name name-style="western"><surname>Perillo</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Assessment and prediction of response to neoadjuvant chemotherapy in breast cancer: a comparison of imaging modalities and future perspectives</article-title><source>Cancers (Basel)</source><year>2021</year><month>07</month><day>14</day><volume>13</volume><issue>14</issue><fpage>3521</fpage><pub-id pub-id-type="doi">10.3390/cancers13143521</pub-id><pub-id pub-id-type="medline">34298733</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fowler</surname><given-names>AM</given-names> </name><name name-style="western"><surname>Mankoff</surname><given-names>DA</given-names> </name><name name-style="western"><surname>Joe</surname><given-names>BN</given-names> </name></person-group><article-title>Imaging neoadjuvant therapy response in breast cancer</article-title><source>Radiology</source><year>2017</year><month>11</month><volume>285</volume><issue>2</issue><fpage>358</fpage><lpage>375</lpage><pub-id pub-id-type="doi">10.1148/radiol.2017170180</pub-id><pub-id pub-id-type="medline">29045232</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kuerer</surname><given-names>HM</given-names> </name><name name-style="western"><surname>Smith</surname><given-names>BD</given-names> </name><name name-style="western"><surname>Krishnamurthy</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Eliminating breast surgery for invasive breast cancer in exceptional responders to neoadjuvant systemic therapy: a multicentre, single-arm, phase 2 trial</article-title><source>Lancet Oncol</source><year>2022</year><month>12</month><volume>23</volume><issue>12</issue><fpage>1517</fpage><lpage>1524</lpage><pub-id pub-id-type="doi">10.1016/S1470-2045(22)00613-1</pub-id><pub-id pub-id-type="medline">36306810</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shi</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Cheng</surname><given-names>Z</given-names> </name><etal/></person-group><article-title>MRI-based quantification of intratumoral heterogeneity for predicting treatment response to neoadjuvant chemotherapy in breast cancer</article-title><source>Radiology</source><year>2023</year><month>07</month><volume>308</volume><issue>1</issue><fpage>e222830</fpage><pub-id pub-id-type="doi">10.1148/radiol.222830</pub-id><pub-id pub-id-type="medline">37432083</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fatayer</surname><given-names>H</given-names> </name><name name-style="western"><surname>Sharma</surname><given-names>N</given-names> </name><name name-style="western"><surname>Manuel</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Serial MRI scans help in assessing early response to neoadjuvant chemotherapy and tailoring breast cancer treatment</article-title><source>Eur J Surg Oncol</source><year>2016</year><month>07</month><volume>42</volume><issue>7</issue><fpage>965</fpage><lpage>972</lpage><pub-id pub-id-type="doi">10.1016/j.ejso.2016.03.019</pub-id><pub-id pub-id-type="medline">27260848</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ali</surname><given-names>HR</given-names> </name><name name-style="western"><surname>Jackson</surname><given-names>HW</given-names> </name><name name-style="western"><surname>Zanotelli</surname><given-names>VRT</given-names> </name><etal/></person-group><article-title>Imaging mass cytometry and multiplatform genomics define the phenogenomic landscape of breast cancer</article-title><source>Nat Cancer</source><year>2020</year><month>02</month><volume>1</volume><issue>2</issue><fpage>163</fpage><lpage>175</lpage><pub-id pub-id-type="doi">10.1038/s43018-020-0026-6</pub-id><pub-id pub-id-type="medline">35122013</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shamai</surname><given-names>G</given-names> </name><name name-style="western"><surname>Binenbaum</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Slossberg</surname><given-names>R</given-names> </name><name name-style="western"><surname>Duek</surname><given-names>I</given-names> </name><name name-style="western"><surname>Gil</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Kimmel</surname><given-names>R</given-names> </name></person-group><article-title>Artificial intelligence algorithms to assess hormonal status from tissue microarrays in patients with breast cancer</article-title><source>JAMA Netw Open</source><year>2019</year><month>07</month><day>3</day><volume>2</volume><issue>7</issue><fpage>e197700</fpage><pub-id pub-id-type="doi">10.1001/jamanetworkopen.2019.7700</pub-id><pub-id pub-id-type="medline">31348505</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>H</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>WD</given-names> </name><name name-style="western"><surname>Shang</surname><given-names>ZH</given-names> </name><etal/></person-group><article-title>Breast cancer molecular subtype prediction on pathological images with discriminative patch selection and multi-instance learning</article-title><source>Front Oncol</source><year>2022</year><volume>12</volume><fpage>858453</fpage><pub-id pub-id-type="doi">10.3389/fonc.2022.858453</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Park</surname><given-names>KH</given-names> </name><name name-style="western"><surname>Loibl</surname><given-names>S</given-names> </name><name name-style="western"><surname>Sohn</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Pan-Asian adapted ESMO Clinical Practice Guidelines for the diagnosis, treatment and follow-up of patients with early breast cancer</article-title><source>ESMO Open</source><year>2024</year><month>05</month><volume>9</volume><issue>5</issue><fpage>102974</fpage><pub-id pub-id-type="doi">10.1016/j.esmoop.2024.102974</pub-id><pub-id pub-id-type="medline">38796284</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tasoulis</surname><given-names>MK</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>HB</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>W</given-names> </name><etal/></person-group><article-title>Accuracy of post-neoadjuvant chemotherapy image-guided breast biopsy to predict residual cancer</article-title><source>JAMA Surg</source><year>2020</year><month>12</month><day>1</day><volume>155</volume><issue>12</issue><fpage>e204103</fpage><pub-id pub-id-type="doi">10.1001/jamasurg.2020.4103</pub-id><pub-id pub-id-type="medline">33026457</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Brogi</surname><given-names>E</given-names> </name><name name-style="western"><surname>Krystel-Whittemore</surname><given-names>M</given-names> </name></person-group><article-title>Papillary neoplasms of the breast including upgrade rates and management of intraductal papilloma without atypia diagnosed at core needle biopsy</article-title><source>Mod Pathol</source><year>2021</year><month>01</month><volume>34</volume><issue>Suppl 1</issue><fpage>78</fpage><lpage>93</lpage><pub-id pub-id-type="doi">10.1038/s41379-020-00706-5</pub-id><pub-id pub-id-type="medline">33106592</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Huang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Yao</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Li</surname><given-names>L</given-names> </name><etal/></person-group><article-title>Deep learning radiopathomics based on preoperative US images and biopsy whole slide images can distinguish between luminal and non-luminal tumors in early-stage breast cancers</article-title><source>EBioMedicine</source><year>2023</year><month>08</month><volume>94</volume><fpage>104706</fpage><pub-id pub-id-type="doi">10.1016/j.ebiom.2023.104706</pub-id><pub-id pub-id-type="medline">37478528</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bhattacharya</surname><given-names>I</given-names> </name><name name-style="western"><surname>Seetharaman</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kunder</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Selective identification and localization of indolent and aggressive prostate cancers via CorrSigNIA: an MRI-pathology correlation and deep learning framework</article-title><source>Med Image Anal</source><year>2022</year><month>01</month><volume>75</volume><fpage>102288</fpage><pub-id pub-id-type="doi">10.1016/j.media.2021.102288</pub-id><pub-id pub-id-type="medline">34784540</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="report"><person-group person-group-type="author"><name name-style="western"><surname>Morris</surname><given-names>E</given-names> </name><name name-style="western"><surname>Comstock</surname><given-names>C</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>C</given-names> </name><etal/></person-group><article-title>ACR BI-RADS magnetic resonance imaging ACR BI-RADS atlas, breast imaging reporting and data system</article-title><year>2013</year><access-date>2026-01-08</access-date><volume>5</volume><comment><ext-link ext-link-type="uri" xlink:href="https://www.acr.org/Clinical-Resources/Clinical-Tools-and-Reference/Reporting-and-Data-Systems/BI-RADS">https://www.acr.org/Clinical-Resources/Clinical-Tools-and-Reference/Reporting-and-Data-Systems/BI-RADS</ext-link></comment></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="web"><source>QuPath</source><access-date>2026-01-21</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://qupath.github.io/">https://qupath.github.io/</ext-link></comment></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dung</surname><given-names>CV</given-names> </name><name name-style="western"><surname>Anh</surname><given-names>LD</given-names> </name></person-group><article-title>Autonomous concrete crack detection using deep fully convolutional neural network</article-title><source>Automation in Construction</source><year>2019</year><month>03</month><volume>99</volume><fpage>52</fpage><lpage>58</lpage><pub-id pub-id-type="doi">10.1016/j.autcon.2018.11.028</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>LC</given-names> </name><name name-style="western"><surname>Papandreou</surname><given-names>G</given-names> </name><name name-style="western"><surname>Schroff</surname><given-names>F</given-names> </name><etal/></person-group><article-title>Rethinking atrous convolution for semantic image segmentation</article-title><source>arXiv</source><comment>Preprint posted online on  Jun 17, 2017</comment><pub-id pub-id-type="doi">10.1007/978-3-030-01234-2_49</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Chai</surname><given-names>ZF</given-names> </name></person-group><article-title>Breast cancer component segmentation</article-title><source>GitHub repository</source><year>2024</year><access-date>2024-07-23</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://github.com/ChaiZhifeiF/Breast-Cancer-Component-Segmentation">https://github.com/ChaiZhifeiF/Breast-Cancer-Component-Segmentation</ext-link></comment></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lee</surname><given-names>HJ</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>EK</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>MJ</given-names> </name><etal/></person-group><article-title>Observer variability of Breast Imaging Reporting and Data System (BI-RADS) for breast ultrasound</article-title><source>Eur J Radiol</source><year>2008</year><month>02</month><volume>65</volume><issue>2</issue><fpage>293</fpage><lpage>298</lpage><pub-id pub-id-type="doi">10.1016/j.ejrad.2007.04.008</pub-id><pub-id pub-id-type="medline">17531417</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Abdullah</surname><given-names>N</given-names> </name><name name-style="western"><surname>Mesurolle</surname><given-names>B</given-names> </name><name name-style="western"><surname>El-Khoury</surname><given-names>M</given-names> </name><name name-style="western"><surname>Kao</surname><given-names>E</given-names> </name></person-group><article-title>Breast imaging reporting and data system lexicon for US: interobserver agreement for assessment of breast masses</article-title><source>Radiology</source><year>2009</year><month>09</month><volume>252</volume><issue>3</issue><fpage>665</fpage><lpage>672</lpage><pub-id pub-id-type="doi">10.1148/radiol.2523080670</pub-id><pub-id pub-id-type="medline">19567644</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ward</surname><given-names>AD</given-names> </name><name name-style="western"><surname>Crukley</surname><given-names>C</given-names> </name><name name-style="western"><surname>McKenzie</surname><given-names>CA</given-names> </name><etal/></person-group><article-title>Prostate: registration of digital histopathologic images to in vivo MR images acquired by using endorectal receive coil</article-title><source>Radiology</source><year>2012</year><month>06</month><volume>263</volume><issue>3</issue><fpage>856</fpage><lpage>864</lpage><pub-id pub-id-type="doi">10.1148/radiol.12102294</pub-id><pub-id pub-id-type="medline">22474671</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kwak</surname><given-names>JT</given-names> </name><name name-style="western"><surname>Sankineni</surname><given-names>S</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Prostate cancer: a correlative study of multiparametric MR imaging and digital histopathology</article-title><source>Radiology</source><year>2017</year><month>10</month><volume>285</volume><issue>1</issue><fpage>147</fpage><lpage>156</lpage><pub-id pub-id-type="doi">10.1148/radiol.2017160906</pub-id><pub-id pub-id-type="medline">28582632</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wildeboer</surname><given-names>RR</given-names> </name><name name-style="western"><surname>Mannaerts</surname><given-names>CK</given-names> </name><name name-style="western"><surname>van Sloun</surname><given-names>RJG</given-names> </name><etal/></person-group><article-title>Automated multiparametric localization of prostate cancer based on B-mode, shear-wave elastography, and contrast-enhanced ultrasound radiomics</article-title><source>Eur Radiol</source><year>2020</year><month>02</month><volume>30</volume><issue>2</issue><fpage>806</fpage><lpage>815</lpage><pub-id pub-id-type="doi">10.1007/s00330-019-06436-w</pub-id><pub-id pub-id-type="medline">31602512</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>HH</given-names> </name><name name-style="western"><surname>Priester</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Prostate microstructure in prostate cancer using 3-T MRI with diffusion-relaxation correlation spectrum imaging: validation with whole-mount digital histopathology</article-title><source>Radiology</source><year>2020</year><month>08</month><volume>296</volume><issue>2</issue><fpage>348</fpage><lpage>355</lpage><pub-id pub-id-type="doi">10.1148/radiol.2020192330</pub-id><pub-id pub-id-type="medline">32515678</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Schoop</surname><given-names>RAL</given-names> </name><name name-style="western"><surname>de Roode</surname><given-names>LM</given-names> </name><name name-style="western"><surname>de Boer</surname><given-names>LL</given-names> </name><name name-style="western"><surname>Dashtbozorg</surname><given-names>B</given-names> </name></person-group><article-title>Framework for deep learning based multi-modality image registration of snapshot and pathology images</article-title><source>IEEE J Biomed Health Inform</source><year>2024</year><month>11</month><volume>28</volume><issue>11</issue><fpage>6699</fpage><lpage>6711</lpage><pub-id pub-id-type="doi">10.1109/JBHI.2024.3444908</pub-id><pub-id pub-id-type="medline">39150810</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Schalk</surname><given-names>SG</given-names> </name><name name-style="western"><surname>Postema</surname><given-names>A</given-names> </name><name name-style="western"><surname>Saidov</surname><given-names>TA</given-names> </name><etal/></person-group><article-title>3D surface-based registration of ultrasound and histology in prostate cancer imaging</article-title><source>Comput Med Imaging Graph</source><year>2016</year><month>01</month><volume>47</volume><fpage>29</fpage><lpage>39</lpage><pub-id pub-id-type="doi">10.1016/j.compmedimag.2015.11.001</pub-id><pub-id pub-id-type="medline">26647110</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ma</surname><given-names>K</given-names> </name><name name-style="western"><surname>Kornecki</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bax</surname><given-names>J</given-names> </name><name name-style="western"><surname>Mundt</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Fenster</surname><given-names>A</given-names> </name></person-group><article-title>Development and validation of a new guidance device for lateral approach stereotactic breast biopsy</article-title><source>Med Phys</source><year>2009</year><month>06</month><volume>36</volume><issue>6</issue><fpage>2118</fpage><lpage>2129</lpage><pub-id pub-id-type="doi">10.1118/1.3130017</pub-id><pub-id pub-id-type="medline">19610301</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nakano</surname><given-names>S</given-names> </name><name name-style="western"><surname>Imawari</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Mibu</surname><given-names>A</given-names> </name><name name-style="western"><surname>Otsuka</surname><given-names>M</given-names> </name><name name-style="western"><surname>Oinuma</surname><given-names>T</given-names> </name></person-group><article-title>Differentiating vacuum-assisted breast biopsy from core needle biopsy: is it necessary?</article-title><source>Br J Radiol</source><year>2018</year><month>12</month><volume>91</volume><issue>1092</issue><fpage>20180250</fpage><pub-id pub-id-type="doi">10.1259/bjr.20180250</pub-id><pub-id pub-id-type="medline">29975150</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yashima</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Fujioka</surname><given-names>T</given-names> </name><name name-style="western"><surname>Kubota</surname><given-names>K</given-names> </name><etal/></person-group><article-title>Comparison of the clinical and pathological characteristics of ultrasound-guided biopsy for breast masses and non-mass lesions between 16-gauge spring-loaded core needle biopsy and 12-gauge spring-loaded vacuum-assisted biopsy</article-title><source>J Med Ultrasonics</source><year>2023</year><month>04</month><volume>50</volume><issue>2</issue><fpage>205</fpage><lpage>212</lpage><pub-id pub-id-type="doi">10.1007/s10396-022-01279-3</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Loving</surname><given-names>VA</given-names> </name><name name-style="western"><surname>Johnston</surname><given-names>BS</given-names> </name><name name-style="western"><surname>Reddy</surname><given-names>DH</given-names> </name><etal/></person-group><article-title>Antithrombotic therapy and hematoma risk during image-guided core-needle breast biopsy</article-title><source>Radiology</source><year>2023</year><month>01</month><volume>306</volume><issue>1</issue><fpage>79</fpage><lpage>86</lpage><pub-id pub-id-type="doi">10.1148/radiol.220548</pub-id><pub-id pub-id-type="medline">35997610</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="web"><article-title>ChaiZhifeiF/breast-cancer-component-segmentation</article-title><source>GitHub</source><access-date>2026-01-21</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://github.com/ChaiZhifeiF/Breast-Cancer-Component-Segmentation">https://github.com/ChaiZhifeiF/Breast-Cancer-Component-Segmentation</ext-link></comment></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Supplementary figures and notes illustrating the modeling framework and statistical evaluation methodologies used in this study.</p><media xlink:href="medinform_v14i1e81181_app1.docx" xlink:title="DOCX File, 3635 KB"/></supplementary-material></app-group></back></article>