<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMI</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Med Inform</journal-id>
      <journal-title>JMIR Medical Informatics</journal-title>
      <issn pub-type="epub">2291-9694</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v9i3e23328</article-id>
      <article-id pub-id-type="pmid">33609339</article-id>
      <article-id pub-id-type="doi">10.2196/23328</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Realistic High-Resolution Body Computed Tomography Image Synthesis by Using Progressive Growing Generative Adversarial Network: Visual Turing Test</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Lovis</surname>
            <given-names>Christian</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Lee</surname>
            <given-names>Hao-Chih</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Arabnia</surname>
            <given-names>Hamid</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author">
          <name name-style="western">
            <surname>Park</surname>
            <given-names>Ho Young</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-2318-9806</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Bae</surname>
            <given-names>Hyun-Jin</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-5134-5517</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Hong</surname>
            <given-names>Gil-Sun</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Department of Radiology and Research Institute of Radiology, University of Ulsan College of Medicine &#38; Asan Medical Center</institution>
            <addr-line>88 Olympic-ro 43-gil, Songpa-gu</addr-line>
            <addr-line>Seoul, 05505</addr-line>
            <country>Republic of Korea</country>
            <phone>82 2 3010 1548</phone>
            <email>hgs2013@gmail.com</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0068-9413</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Kim</surname>
            <given-names>Minjee</given-names>
          </name>
          <degrees>BSc</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0035-4437</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Yun</surname>
            <given-names>JiHye</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-5233-6687</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Park</surname>
            <given-names>Sungwon</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-7032-8496</ext-link>
        </contrib>
        <contrib id="contrib7" contrib-type="author">
          <name name-style="western">
            <surname>Chung</surname>
            <given-names>Won Jung</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-3323-2508</ext-link>
        </contrib>
        <contrib id="contrib8" contrib-type="author">
          <name name-style="western">
            <surname>Kim</surname>
            <given-names>NamKug</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff5" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-3438-2217</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Radiology and Research Institute of Radiology, University of Ulsan College of Medicine &#38; Asan Medical Center</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Department of Medicine, University of Ulsan College of Medicine &#38; Asan Medical Center</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Department of Biomedical Engineering, Asan Medical Institute of Convergence Science and Technology, Asan Medical Center, University of Ulsan College of Medicine, Seoul, Republic of Korea</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Department of Health Screening and Promotion Center, University of Ulsan College of Medicine &#38; Asan Medical Center</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff5">
        <label>5</label>
        <institution>Department of Convergence Medicine, University of Ulsan College of Medicine &#38; Asan Medical Center</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Gil-Sun Hong <email>hgs2013@gmail.com</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <month>3</month>
        <year>2021</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>17</day>
        <month>3</month>
        <year>2021</year>
      </pub-date>
      <volume>9</volume>
      <issue>3</issue>
      <elocation-id>e23328</elocation-id>
      <history>
        <date date-type="received">
          <day>10</day>
          <month>8</month>
          <year>2020</year>
        </date>
        <date date-type="rev-request">
          <day>21</day>
          <month>9</month>
          <year>2020</year>
        </date>
        <date date-type="rev-recd">
          <day>15</day>
          <month>11</month>
          <year>2020</year>
        </date>
        <date date-type="accepted">
          <day>20</day>
          <month>2</month>
          <year>2021</year>
        </date>
      </history>
      <copyright-statement>©Ho Young Park, Hyun-Jin Bae, Gil-Sun Hong, Minjee Kim, JiHye Yun, Sungwon Park, Won Jung Chung, NamKug Kim. Originally published in JMIR Medical Informatics (http://medinform.jmir.org), 17.03.2021.</copyright-statement>
      <copyright-year>2021</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Informatics, is properly cited. The complete bibliographic information, a link to the original publication on http://medinform.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://medinform.jmir.org/2021/3/e23328" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Generative adversarial network (GAN)–based synthetic images can be viable solutions to current supervised deep learning challenges. However, generating highly realistic images is a prerequisite for these approaches.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>The aim of this study was to investigate and validate the unsupervised synthesis of highly realistic body computed tomography (CT) images by using a progressive growing GAN (PGGAN) trained to learn the probability distribution of normal data.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We trained the PGGAN by using 11,755 body CT scans. Ten radiologists (4 radiologists with &#60;5 years of experience [Group I], 4 radiologists with 5-10 years of experience [Group II], and 2 radiologists with &#62;10 years of experience [Group III]) evaluated the results in a binary approach by using an independent validation set of 300 images (150 real and 150 synthetic) to judge the authenticity of each image.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The mean accuracy of the 10 readers in the entire image set was higher than random guessing (1781/3000, 59.4% vs 1500/3000, 50.0%, respectively; <italic>P</italic>&#60;.001). However, in terms of identifying synthetic images as fake, there was no significant difference in the specificity between the visual Turing test and random guessing (779/1500, 51.9% vs 750/1500, 50.0%, respectively; <italic>P</italic>=.29). The accuracy between the 3 reader groups with different experience levels was not significantly different (Group I, 696/1200, 58.0%; Group II, 726/1200, 60.5%; and Group III, 359/600, 59.8%; <italic>P</italic>=.36). Interreader agreements were poor (κ=0.11) for the entire image set. In subgroup analysis, the discrepancies between real and synthetic CT images occurred mainly in the thoracoabdominal junction and in the anatomical details.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>The GAN can synthesize highly realistic high-resolution body CT images that are indistinguishable from real images; however, it has limitations in generating body images of the thoracoabdominal junction and lacks accuracy in the anatomical details.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>generative adversarial network</kwd>
        <kwd>unsupervised deep learning</kwd>
        <kwd>computed tomography</kwd>
        <kwd>synthetic body images</kwd>
        <kwd>visual Turing test</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Generative adversarial networks (GANs) is a recent innovative technology that generates artificial but realistic-looking images. Despite the negative views regarding the use of synthetic images in the medical field, GANs have been spotlighted in radiological research because of their undeniable advantages [<xref ref-type="bibr" rid="ref1">1</xref>]. The use of diagnostic radiological images in the public domain always raises the problem of protecting patients’ privacy [<xref ref-type="bibr" rid="ref2">2</xref>-<xref ref-type="bibr" rid="ref5">5</xref>]. This has been a great challenge to researchers in the field of deep learning. GANs may provide a solution to these privacy concerns. Moreover, GANs are powerful nonsupervised training methods. The traditional supervised learning methods have been challenged by a lack of high-quality training data labelled by experts. Building these data requires considerable time input from experts and leads to correspondingly high costs [<xref ref-type="bibr" rid="ref6">6</xref>]. This problem has not yet been resolved despite several collaborative efforts to build large open access data sets [<xref ref-type="bibr" rid="ref7">7</xref>]. Most radiological tasks using GANs include the generation of synthetic images for augmenting training images [<xref ref-type="bibr" rid="ref8">8</xref>-<xref ref-type="bibr" rid="ref11">11</xref>], translation between different radiological modalities [<xref ref-type="bibr" rid="ref12">12</xref>-<xref ref-type="bibr" rid="ref16">16</xref>], image reconstruction and denoising [<xref ref-type="bibr" rid="ref17">17</xref>-<xref ref-type="bibr" rid="ref20">20</xref>], and data segmentation [<xref ref-type="bibr" rid="ref21">21</xref>-<xref ref-type="bibr" rid="ref24">24</xref>].</p>
      <p>The more recent noteworthy task using GANs is anomaly detection. Unlike other tasks using GANs, detecting abnormalities is based on learning the probability distribution of normal training data. Image data outside this distribution are considered as abnormal. Schlegl et al [<xref ref-type="bibr" rid="ref25">25</xref>] demonstrated GAN-based anomaly detection in optical coherence tomography images. They trained GAN with normal data in an unsupervised approach and proposed an anomaly scoring scheme. Alex et al [<xref ref-type="bibr" rid="ref26">26</xref>] showed that GAN can detect brain lesions on magnetic resonance images. This approach has attracted many radiologists for several reasons; the most critical is that this approach can achieve a broader clinical application than the current supervised deep learning–based diagnostic models. In daily clinical practice, diagnostic images are clinically acquired for patients with a variety of diseases. Therefore, before applying the supervised deep learning model, it is necessary to select suspected disease cases with disease categories similar to those of a training data set. For example, in the emergency department, a deep learning model trained by data from patients with acute appendicitis could hardly be applied to patients with different abdominal pathologies.</p>
      <p>For this approach, we think that generating highly realistic images is a prerequisite. Previous studies [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref26">26</xref>] trained a GAN model with small patches (64×64 pixels), which are randomly extracted from original images. The trained model could only generate small patches and did not learn the semantics of the whole images. Hence, the GAN model may generate artificial features, which can lead to large errors in anomaly detection tasks. In addition, there are various kinds of small and subtle lesions in the actual clinical setting. Therefore, the previous low-resolution GAN approaches could not be used for this application. In this study, we trained GAN with whole-body computed tomography (CT) images (512×512 pixels); therefore, the model learned the semantics of the images. This may lead to robust performances in anomaly detection in CT images. Due to the aforementioned reasons, we have attempted to build large data sets of normal medical images to develop GAN-based diagnostic models for clinical application. As a preliminary study, we investigated and validated the unsupervised synthesis of highly realistic body CT images by using GAN by learning the probability distribution of normal training data.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Ethical Approval</title>
        <p>This retrospective study was conducted according to the principles of the Declaration of Helsinki and was performed in accordance with current scientific guidelines. This study protocol was approved by the Institutional Review Board Committee of the Asan Medical Center (No. 2019-0486). The requirement for informed patient consent was waived.</p>
      </sec>
      <sec>
        <title>Data Collection for Training</title>
        <p>We retrospectively reviewed electronic medical records of patients who underwent chest CT or abdominopelvic CT (AP-CT) in the Health Screening and Promotion Center of Asan Medical Center between January 2013 and December 2017. We identified 139,390 patients. Their radiologic reports were then reviewed using the radiologic diagnostic codes “Code 0” or “Code B0,” which indicated normal CT in our institution’s disease classification system, and 17,854 patients with normal chest CT or normal AP-CT were identified. One board-certified radiologist (GSH) reviewed the radiological reports of the 17,854 patients and excluded 3650 cases with incidental benign lesions (eg, hepatic cysts, renal cysts, thyroid nodules) detected on body CT images. Benign lesions were defined as positive incidental findings on CT images, which did not require medical or surgical intervention. Our final study group included CT images showing anatomical variations (eg, right aortic arch, double inferior vena cava) and senile changes (eg, atherosclerotic calcification without clinical significance). Of the potentially suitable 14,204 cases, 2449 CT data sets were not available for automatic download using the inhouse system of our institution. Finally, this study included 11,755 body CT scans (473,833 axial slices) for training the GAN, comprising 5000 contrast-enhanced chest CT scans (172,249 axial slices) and 6755 AP-CT scans (301,584 axial slices, comprising 132,880 slices of contrast-enhanced AP-CT and 168,704 slices of contrast-enhanced low-dose AP-CT images).</p>
      </sec>
      <sec>
        <title>Training PGGAN to Generate Body CT Images</title>
        <p>A progressive growing GAN (PGGAN) was used to generate high-resolution (512×512 pixels) synthetic body CT images. Unlike PGGAN, previous GAN models such as deep convolutional GANs were able to generate relatively low-resolution (256×256 pixels) synthetic images [<xref ref-type="bibr" rid="ref27">27</xref>]. However, PGGANs have demonstrated that high-resolution images (1024×1024 pixels) can be generated by applying progressive growing techniques [<xref ref-type="bibr" rid="ref28">28</xref>]. Because CT images are acquired in high resolutions (512×512 pixels), PGGAN could be the GAN model that can train with whole CT images in full resolution. Consequently, the GAN model can preserve their semantics in the original resolution of CT images. While StyleGAN also demonstrates realistic synthetic images with the style feature [<xref ref-type="bibr" rid="ref29">29</xref>], we chose the PGGAN model for training because of its simple yet powerful performance. In addition, we did not consider BigGAN because it is a conditional model [<xref ref-type="bibr" rid="ref30">30</xref>]. To train the PGGAN with body CT images, the original 12-bit grayscale CT images were converted into 8-bit grayscale potable network graphics images with 3 different windowing settings: (1) a lung setting (window width 1500, window level 600), (2) a mediastinal setting (window width 450, window level 50) for chest CT images, and (3) a multiorgan setting (window width 350, window level 40) for AP-CT images. Images from each group with different windowing settings were used to train a PGGAN separately.</p>
        <p>A publicly available official implementation of PGGAN using Tensorflow in Python was used [<xref ref-type="bibr" rid="ref31">31</xref>]. While the sizes of the training images progressively grew from 4×4 to 512×512 (ie, 2<sup>n</sup>×2<sup>n</sup>, where the integer n increases from 2 to 8), the batch sizes decreased from 512 to 16, respectively. The learning rate was fixed at 0.001 while training. We carefully monitored the training process (ie, training losses and generated images) with TensorBoard and intermediated image generation to determine whether the PGGAN was properly trained. The PGGAN training was completed after the network had evaluated around 20 million body CT images. The training took ~12.5 days with 2 NVIDIA Titan RTX graphic processing units for each group with different windowing settings (ie, total training for ~37.5 days).</p>
      </sec>
      <sec>
        <title>Visual Turing Test to Assess the Realistic Nature of Synthetic CT Images</title>
        <p><xref rid="figure1" ref-type="fig">Figure 1</xref> summarizes the study design for the visual assessment performed using an image Turing test. The validation set consisted of 300 axial body CT images (150 synthetic images and 150 real images). The 150 synthetic images comprised 50 chest CT-lung window (chest-L), 50 chest CT-mediastinal window (chest-M), and 50 AP-CT images. The validation set consisted of 7 subgroups based on the anatomical structure: 50 chest-L images were divided into upper lung, middle lung, and lower lung groups; and 50 chest-M and 50 AP-CT images were divided into thorax, thoracoabdominal junction, abdomen, and pelvis groups. To avoid any selection bias, all synthetic images in the validation set were automatically generated by the PGGAN model and were not individually selected by the researchers. For the real images, 50 CT images of each anatomical subgroup (ie, chest-L, chest-M, and AP-CT) were randomly selected from 50 normal whole-body CT scans (performed at the emergency department of Asan Medical Center) by 1 co-researcher (JHY) who did not otherwise participate in the realism assessment study. A website (<ext-link ext-link-type="uri" xlink:href="http://validct.esy.es/" xlink:type="simple">validct.esy.es</ext-link>) was created to upload the validation set with 300 axial images posted and displayed in a random manner. Ten radiologists (4 radiologists with &#60;5 years of experience [Group I], 4 radiologists with 5-10 years of experience [Group II], and 2 radiologists with &#62;10 years of experience [Group III]) independently evaluated each of the 300 images slice-by-slice and decided whether each CT image was real or artificial by visual analysis with no time limit. To investigate the features of the images with obviously artificial appearance, we defined obviously artificial images as synthetic images that were identified as artificial by a majority of readers. Two radiologists (HYP and GSH) then visually reviewed these obviously artificial images. To determine whether the radiologists could learn to distinguish real from synthetic images, we performed an additional Turing test (postlearning visual Turing test). First, 2 board-certified radiologists (Group III) were educated in the obviously artificial findings in the synthetic images (not included in the test set). Then, 2 readers independently decided whether each of the 300 CT images were real or artificial by visual analysis. For accurate comparison of the results, the same test set as the index visual Turing test was used.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Graphical illustration of the method used to estimate the realism of the synthetic body computed tomography images. The validation set consisted of 150 synthetic and 150 real images. Synthetic images generated by the progressive growing generative adversarial network model and real images were randomly mixed and displayed on the website. Ten readers independently determined whether each image was real or artificial.</p>
          </caption>
          <graphic xlink:href="medinform_v9i3e23328_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Statistical Analyses</title>
        <p>The mean accuracy, sensitivity, and specificity of the 10 readers were calculated. The generalized estimating equations method was used to test whether the ratio of mean accuracy and random guessing was 1. The generalized estimating equations were used to compare the accuracy, sensitivity, and specificity across the reader groups with different experience levels (Group I, Group II, and Group III) and across the anatomical subgroups. To compare the diagnostic performance among subgroups, chest-L was classified into 3 image subgroups (upper, middle, and lower lung), and chest-M and AP-CT images were grouped into 4 image subgroups (thorax, thoracoabdominal junction, abdomen, and pelvis) on the basis of anatomical structures by visual inspection. The anatomical landmarks used in subgrouping of CT-L were as follows: (1) upper lung: apex to upper border of tracheal bifurcation; (2) middle lung: upper border of tracheal bifurcation to upper border of diaphragm; and (3) lower lung: upper border of diaphragm to lower border of diaphragm. The anatomical landmarks used in the subgroups of CT-M and AP-CT were as follows: (1) thorax: apex to upper border of diaphragm; (2) thoracoabdominal junction: upper border of diaphragm to lower border of diaphragm; (3) abdomen: lower border of diaphragm to upper border of iliac crest; and (4) pelvis: below the upper border of iliac crest. Chest-M and AP-CT images were combined for the subgroup classification because these images included the “soft tissue setting” used for the whole body. <xref rid="figure1" ref-type="fig">Figure 1</xref> shows the subgroup classification according to the anatomical level. The significance level was corrected for multiple comparisons using the Bonferroni correction. Interreader agreement was evaluated using Fleiss kappa. To identify obviously artificial images, a histogram analysis was used to display the distribution of the number of correct answers from the 10 readers (ie, identification of synthetic images as artificial) and the number of artificial images. The cut-off values (ie, percentage of readers with correct answers) were set where dramatic changes in the histogram distribution was observed. When a cut-off ≥70% was used for chest-L and ≥80% for chest-M and AP-CT images, 1 subgroup (ie, upper lung for chest-L and thoracoabdominal junction for chest-M and AP-CT images) had the highest number of readers with correct answers. In the postlearning visual Turing test, the mean accuracy, sensitivity, and specificity of the 2 readers were calculated. SPSS software (version 23, IBM Corp) and R version 3.5.3 (R Foundation for Statistical Computing) were used for the statistical analyses with the significance level set at <italic>P</italic>&#60;.05.</p>
        <p/>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Results of the Visual Turing Test</title>
        <p><xref ref-type="table" rid="table1">Table 1</xref> summarizes the results of the realism assessment of all images by the 10 readers. The mean accuracy of the 10 readers in the entire image set was higher than the random guessing (1781/3000, 59.4% vs 1500/3000, 50.0%, respectively; <italic>P</italic>&#60;.001). However, in terms of identifying synthetic images as fake, there was no significant difference in the specificity between the visual Turing test and random guessing (779/1500, 51.9% vs 750/1500, 50.0%, respectively; <italic>P</italic>=.29). There was no significant difference in the accuracy between the 3 reader groups with different experience levels (Group I, 696/1200, 58.0%; Group II, 726/1200, 60.5%; and Group III, 359/600, 59.8%; <italic>P</italic>=.36). In the detection of synthetic images, Group III showed a significantly lower specificity than Group II (<italic>P</italic>=.01) but did not show a significant difference from Group I (<italic>P</italic>=.30). <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref> summarizes the results of the subgroup analysis of the realism assessment according to the anatomical region. There were no significant differences in the accuracy between the 3 CT groups (chest-L, 595/1000, 59.5%; chest-M, 615/1000, 61.5%; and AP-CT, 571/1000, 57.1%; <italic>P</italic>=.33). In addition, there was no significant difference in the accuracy between the upper, middle, and lower lung groups of the chest-L images (upper lung, 227/370, 61.4%; middle lung, 190/290, 65.5%; and lower lung, 136/240, 56.7%, <italic>P</italic>=.36). The thoracoabdominal junction showed a significantly higher accuracy (208/280, 74.3% vs 194/370, 52.4% to 361/600, 60.2%; <italic>P</italic>=.004) and specificity (154/200, 77.0% vs 93/220, 42.3% to 149/250, 59.6%; <italic>P</italic>&#60;.001) compared with the other subgroups. Examples of the multilevel random generation of synthetic chest CT and AP-CT images by the PGGAN are shown in <xref rid="figure2" ref-type="fig">Figure 2</xref> and in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>, <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>, and <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Assessment of the realism of all images by the 10 readers.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="250"/>
            <col width="240"/>
            <col width="240"/>
            <col width="240"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Groups, readers (R)</td>
                <td>Accuracy (%)<sup>a</sup></td>
                <td>Sensitivity (%)<sup>b</sup></td>
                <td>Specificity (%)<sup>c</sup></td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="5">
                  <bold>Group I<sup>d</sup></bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>R01</td>
                <td>56.7</td>
                <td>67.3</td>
                <td>46.0</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>R05</td>
                <td>48.3</td>
                <td>53.3</td>
                <td>43.3</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>R09</td>
                <td>61.0</td>
                <td>70.7</td>
                <td>51.3</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>R10</td>
                <td>66.0</td>
                <td>70.7</td>
                <td>61.3</td>
              </tr>
              <tr valign="top">
                <td colspan="5">
                  <bold>Group II<sup>e</sup></bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>R02</td>
                <td>43.7</td>
                <td>50.0</td>
                <td>37.3</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>R06</td>
                <td>73.0</td>
                <td>68.7</td>
                <td>77.3</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>R07</td>
                <td>61.3</td>
                <td>65.3</td>
                <td>57.3</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>R08</td>
                <td>64.0</td>
                <td>77.3</td>
                <td>50.7</td>
              </tr>
              <tr valign="top">
                <td colspan="5">
                  <bold>Group III<sup>f</sup></bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>R03</td>
                <td>65.3</td>
                <td>86.0</td>
                <td>44.7</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>R04</td>
                <td>54.3</td>
                <td>58.7</td>
                <td>50.0</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>Mean (95% CI) accuracy: 59.4 (56.9-61.8), <italic>P</italic>=.36. <italic>P</italic> value was determined by generalized estimating equations.</p>
            </fn>
            <fn id="table1fn2">
              <p><sup>b</sup>Mean (95% CI) sensitivity: 66.8 (63.9-69.5), <italic>P</italic>=.04.</p>
            </fn>
            <fn id="table1fn3">
              <p><sup>c</sup>Mean (95% CI) specificity: 51.9 (48.4-55.5), <italic>P</italic>=.02.</p>
            </fn>
            <fn id="table1fn4">
              <p><sup>d</sup>Group I: radiologists with &#60;5 years of experience. Mean (95% CI) accuracy 58.0 (55.0-61.0), sensitivity 65.5 (61.4-69.4), and specificity 50.5 (46.3-54.7).</p>
            </fn>
            <fn id="table1fn5">
              <p><sup>e</sup>Group II: radiologists with 5-10 years of experience. Mean (95% CI) accuracy 60.5 (57.6-63.4), sensitivity 65.3 (61.4-69.0), and specificity 55.7 (51.4-59.9).</p>
            </fn>
            <fn id="table1fn6">
              <p><sup>f</sup>Group III: radiologists with &#62;10 years of experience. Mean (95% CI) accuracy 59.8 (55.5-64.1), sensitivity 72.3 (67.0-77.1), and specificity 47.3 (41.1-53.7).</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Synthetic high-resolution body computed tomography images. A. Chest computed tomography images-lung window. B. Chest computed tomography images-mediastinal window. C. Abdominopelvic computed tomography images.</p>
          </caption>
          <graphic xlink:href="medinform_v9i3e23328_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>In the postlearning visual Turing test, the mean accuracy, sensitivity, and specificity of the 2 radiologists were 67.3%, 72.7%, and 62.0%, respectively. Compared with the results of the index visual Turing test, the accuracy was increased by 7.5% and the specificity was increased by 10.1% in the postlearning visual Turing test.</p>
      </sec>
      <sec>
        <title>Interreader Agreement for Synthetic and Real Images</title>
        <p>Interreader agreement was poor for the entire image set (κ=0.11) and for the 3 CT subsets (chest-L, chest-M, and AP-CT; κ=0.04-0.13). Interreader agreement was higher for the thoracoabdominal junction subset than for the other anatomical regions (κ=0.31 vs 0.03-0.14) (<xref ref-type="table" rid="table2">Table 2</xref>).</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Interreader agreement of the 10 readers with respect to the imaging subgroups.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="350"/>
            <col width="0"/>
            <col width="320"/>
            <col width="0"/>
            <col width="300"/>
            <thead>
              <tr valign="top">
                <td colspan="3">Image type, subsets</td>
                <td colspan="2">Kappa values</td>
                <td>95% CI</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="3">Entire image set</td>
                <td colspan="2">0.11</td>
                <td>0.09 to 0.13</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>Image subsets</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Chest-L<sup>a</sup></td>
                <td colspan="2">0.04</td>
                <td colspan="2">0.01 to 0.07</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Chest-M<sup>b</sup></td>
                <td colspan="2">0.13</td>
                <td colspan="2">0.10 to 0.15</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>AP-CT<sup>c</sup></td>
                <td colspan="2">0.11</td>
                <td colspan="2">0.08 to 0.14</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>Chest-L</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Upper lung</td>
                <td colspan="2">0.04</td>
                <td colspan="2">–0.01 to 0.09</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Middle lung</td>
                <td colspan="2">0.01</td>
                <td colspan="2">–0.04 to 0.07</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Lower lung</td>
                <td colspan="2">0.06</td>
                <td colspan="2">0.00 to 0.12</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>Chest-M and AP-CT</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Thorax</td>
                <td colspan="2">0.03</td>
                <td colspan="2">–0.01 to 0.06</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Thoracoabdominal junction</td>
                <td colspan="2">0.31</td>
                <td colspan="2">0.25 to 0.36</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Abdomen</td>
                <td colspan="2">0.14</td>
                <td colspan="2">0.10 to 0.18</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Pelvis</td>
                <td colspan="2">0.03</td>
                <td colspan="2">–0.02 to 0.08</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table2fn1">
              <p><sup>a</sup>Chest-L: chest computed tomography images-lung window.</p>
            </fn>
            <fn id="table2fn2">
              <p><sup>b</sup>Chest-M: chest computed tomography images-mediastinal window.</p>
            </fn>
            <fn id="table2fn3">
              <p><sup>c</sup>AP-CT: abdominopelvic computed tomography images.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Analysis of the Features of Obviously Artificial Images</title>
        <p><xref rid="figure3" ref-type="fig">Figure 3</xref> shows that the majority of readers characterized the synthetic images as artificial predominantly at the thoracoabdominal junction of the chest-M and AP-CT, followed by the upper lung of the chest-L. Using a histogram analysis, 24 of the 150 synthetic images (22 images of the chest-M and AP-CT groups and 2 images of the upper lung) were selected and reviewed by 2 radiologists to identify the features indicating that the images were artificial. <xref ref-type="table" rid="table3">Table 3</xref> details the artificial features indicative of synthetic CT images. A total of 34 artificial features were found in the 24 synthetic images, the most common being vascular structures (24/34, 71%), followed by movable organs (ie, stomach, heart, small bowel, and mediastinal fat around the heart, 8/34, 24%). Among the vascular structures, intrahepatic vessels (ie, portal and hepatic veins) most frequently had abnormal configurations, directions, or diameters (<xref rid="figure4" ref-type="fig">Figure 4</xref>). In case of the movable organs, an abnormal organ contour was the main feature indicative of an artificially generated image (<xref rid="figure4" ref-type="fig">Figure 4</xref>C and <xref rid="figure4" ref-type="fig">Figure 4</xref>D).</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Histogram analysis of the correct answers for the 150 synthetic images (accurate identification of the artificial images) by the 10 readers. A. When a cut-off for the percentage of readers with correct answers was set at ≥70% for the chest computed tomography-lung window group, only 1 subgroup (upper lung) remained (§). B. When a cut-off level for the percentage of readers with correct answers was set at ≥80% for the chest computed tomography-mediastinal window and abdominopelvic computed tomography groups, the thoracoabdominal (TA) junction group (*) showed dominance over the other subgroups.</p>
          </caption>
          <graphic xlink:href="medinform_v9i3e23328_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <table-wrap position="float" id="table3">
          <label>Table 3</label>
          <caption>
            <p>Details of the obviously artificial body computed tomography images.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="770"/>
            <col width="0"/>
            <col width="200"/>
            <thead>
              <tr valign="top">
                <td colspan="3">Configuration, artificial features</td>
                <td>Images (n)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="4">
                  <bold>Abnormal vascular configuration<sup>a</sup></bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Hepatic vessel (portal vein and hepatic vein)</td>
                <td colspan="2">13</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Gastric vessel</td>
                <td colspan="2">3</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Mesenteric vessel</td>
                <td colspan="2">2</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Pulmonary vessel</td>
                <td colspan="2">2</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Others (peripancreatic, coronary, rectal, axillary vessel)</td>
                <td colspan="2">4</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Abnormal contour or structure<sup>b</sup></bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Stomach</td>
                <td colspan="2">3</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Pancreas</td>
                <td colspan="2">2</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Heart</td>
                <td colspan="2">2</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Mediastinal fat around the heart</td>
                <td colspan="2">2</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Small bowel</td>
                <td colspan="2">1</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table3fn1">
              <p><sup>a</sup>Ill-defined vascular margin, bizarre vascular course, or abnormal vascular diameter.</p>
            </fn>
            <fn id="table3fn2">
              <p><sup>b</sup>Blurred margin of the organ, or bizarre structure of the soft tissue.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>Obviously artificial body computed tomography images. A. Ill-defined margins and abnormal courses of intrahepatic vessels (arrows) in the liver. Note curvilinear structures (dotted rectangle) at the liver and stomach. B. Accentuated vascular markings in both upper lung apices (arrows). C. Abnormal infiltration in the pericardial fat (arrows). D. Irregular contours of the stomach body and antrum with blurred margins (arrows).</p>
          </caption>
          <graphic xlink:href="medinform_v9i3e23328_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>We showed that the GAN-based synthetic whole-body CT images have comparable image fidelity to real images. For this, our study validated the synthetic images by multiple radiology experts because the visual Turing test could be greatly influenced by the reader’s level of expertise [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref33">33</xref>]. There was no significant difference in the accuracy between the reader groups. In addition, the interreader agreement was poor for the distinction between real and synthetic images. These results imply that a validation test was properly performed with mitigation of the impact of the reader’s level of expertise. However, there was quite a significant disparity between sensitivity (66.8%) and specificity (51.9%). We presume that this is mainly due to factors affecting reader performance test. First, all readers had at least some exposure to real body CT images in clinical practice. In addition, the real images in the validation data set consisted of relatively uniform CT images because they were acquired using a similar CT machine with similar acquisition parameters. These factors affect the readers’ confidence and decisions to identify real images, resulting in high sensitivity. This is supported by the fact that the sensitivity proposed here reached 72.3% in Group III (radiologists with long-term exposure to real CT images in our institution). In contrast, some obviously artificial features (eg, the ill-defined margin of the heart) in synthetic images are similar to the motion artifacts or noises in real images. This can cause reader confusion, resulting in lower specificity. In addition, the mean accuracy (59.4%) was higher than random guessing (50%); however, it is believed that the high sensitivity contributed significantly to this result. Therefore, in terms of identifying synthetic images as fake, the readers’ performance was not much better than random guessing. For robust validation, using real CT images from other medical institutions (not experienced by the readers) in the validation set could be needed. Despite this limitation, our data suggest that the synthetic images are highly realistic and indistinguishable from real CT images.</p>
        <p>One critical finding of this study was that the discrepancies between real and synthetic CT images occur mainly in the thoracoabdominal junction and in anatomical details. The thoracoabdominal junction is the most prone to motion artifacts due to respiratory movement. In addition, it has a complex anatomical structure due to multiple organs in small spaces [<xref ref-type="bibr" rid="ref34">34</xref>]. These features of the thoracoabdominal junction might have contributed to the identification of unrealistic synthetic body images. This phenomenon in the areas with complex structures has been shown in other image syntheses using GANs [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>]. It is worth noting that this study showed that GAN achieved highly realistic images for gross anatomy and not for detailed anatomical structures. The most common obviously artificial features in synthetic images were bizarre configurations and directions of small-to-medium vessels. This is probably due to the lack of the interslice shape continuity caused by the 2D CT image–training and the anatomical diversity of these vessels [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref35">35</xref>]. Therefore, to overcome these limitations, further work would require the generation of 3D CT images with larger and more diverse data sets. The second most obviously artificial feature was an abnormal contour of the movable organs. This could be another limitation in the GAN-based realistic image synthesis. Recently, more powerful GAN models have been introduced into the medical field. We believe that many problems raised here can serve as criteria to test the performance of the newly introduced GAN models.</p>
        <p>As expected, learning artificial features in the synthetic images improved the performance of radiologists in identifying artificial images. However, it did not reach our expectations. This is because artificial features occurred mainly in some images of certain anatomical subgroups. In addition, as mentioned before, it is not easy for radiologists to distinguish these artificial features from motion artifacts or noise in real images. Furthermore, our visual Turing tests were based on reviewing 2D synthetic CT slices. However, although 3D data (eg, CT) are presented as 2D images, human perception of an anomaly is based on the imagination of space from 2D images. These factors could make it difficult to determine whether each CT image is real or artificial.</p>
      </sec>
      <sec>
        <title>Comparison With Prior Work</title>
        <p>Bermudez et al [<xref ref-type="bibr" rid="ref36">36</xref>] reported that GAN can successfully generate realistic brain MR images. However, unlike this study, the previous GAN-based unconditional synthesis of advanced radiological images (CT or magnetic resonance images) has been confined to some specific pathologic lesions (eg, lung and liver lesions) and specific organs (eg, heart and brain) for a variety of purposes [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref36">36</xref>-<xref ref-type="bibr" rid="ref40">40</xref>]. In contrast, this study shows that realistic high-resolution (512×512 pixels) whole-body CT images can be synthesized by GAN. GAN was trained with whole-body CT images (512×512 pixels) in this study; therefore, the model learned the semantics of the images. It is worth noting that the generated images cover a wide range of 2-dimensional (2D) slice CT images along the z-axis from the thorax to the pelvis and contain multiple organs. To the best of our knowledge, there has been no study that has investigated and validated the unsupervised synthesis of highly realistic body CT images by using a PGGAN.</p>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>Our study had some limitations. First, technical novelty is lacking in this study. However, while state-of-the-art GAN models such as PGGAN and StyleGAN were introduced recently, there are still limited studies in the medical domain and a lack of published studies on anomaly detection tasks. As far as we know, this is the first attempt to generate high-quality medical images (whole-body CT) and to validate the generated medical images by expert radiologists. This study will provide readers a way to follow our approach and to achieve advances in anomaly detection tasks in medical imaging. Second, our training data are not enough to cover the probability distribution of normal data. This preliminary study used normal CT images from our institution. The training data consisted of relatively homogeneous CT images with similar acquisition parameters and CT machines. Therefore, further studies should focus on the collection of multi-center and multi-country diverse CT data to achieve better results. Third, due to limited graphics processing unit memory, our study only validated the realistic nature of separate 2D high-resolution body CT slices that were randomly generated by the GAN. This study did not handle 3D synthetic CT images, although real body CT images are volumetric data. Therefore, interslice continuity of pathologic lesions and organs may be a crucial factor for improving the performance of deep learning–based models. Further studies are needed to generate and validate 2.5D or 3D synthetic CT images in terms of detailed anatomical structures. Fourth, the number of synthetic images in the validation set varied between each anatomical region; thus, the statistical power may have been insufficient. However, we tried to avoid any researcher-associated selection bias in this process. Finally, we did not evaluate the correlation between the number of CT images in the training set and the generation of realistic images in the validation set. Our study showed that the PGGAN can successfully produce realistic body CT images by using a much smaller amount of training data in contrast to previous studies on the generation of celebrity face images with 1K pixels by 1K pixels [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>]. However, we did not provide a cut-off value for the number of CT images required to generate realistic images. Therefore, further studies are needed to clarify the approximate data set size required for the generation of highly realistic normal or disease-state CT images.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>GAN can synthesize highly realistic high-resolution body CT images indistinguishable from real images; however, it has limitations in generating body images in the thoracoabdominal junction and lacks accuracy in anatomical details.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Example video of the multi-level random generation of synthetic chest computed tomography-lung window by the progressive growing generative adversarial network.</p>
        <media xlink:href="medinform_v9i3e23328_app1.mp4" xlink:title="MP4 File  (MP4 Video), 34971 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Example video of the multi-level random generation of synthetic chest computed tomography-mediastinal window by the progressive growing generative adversarial network.</p>
        <media xlink:href="medinform_v9i3e23328_app2.mp4" xlink:title="MP4 File  (MP4 Video), 35117 KB"/>
      </supplementary-material>
      <supplementary-material id="app3">
        <label>Multimedia Appendix 3</label>
        <p>Example video of the multi-level random generation of synthetic abdominopelvic computed tomography images by the progressive growing generative adversarial network.</p>
        <media xlink:href="medinform_v9i3e23328_app3.mp4" xlink:title="MP4 File  (MP4 Video), 34476 KB"/>
      </supplementary-material>
      <supplementary-material id="app4">
        <label>Multimedia Appendix 4</label>
        <p>Subgroup analysis of diagnostic performance with respect to the anatomical subgroups. A. Accuracy, B. Sensitivity, C. Specificity. There was a significant difference in accuracy (*) and specificity (†) between the thoracoabdominal junction (TA) and other image subgroups. Chest-L: chest computed tomography-lung window; chest-M: chest computed tomography-mediastinal window; AP-CT: abdominopelvic computed tomography.</p>
        <media xlink:href="medinform_v9i3e23328_app4.png" xlink:title="PNG File , 152 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AP-CT</term>
          <def>
            <p>abdominopelvic computed tomography</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">Chest-L</term>
          <def>
            <p>chest computed tomography-lung window</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">Chest-M</term>
          <def>
            <p>chest computed tomography-mediastinal window</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">CT</term>
          <def>
            <p>computed tomography</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">GAN</term>
          <def>
            <p>generative adversarial network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">PGGAN</term>
          <def>
            <p>progressive growing generative adversarial network</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>The authors are grateful to Ju Hee Lee, MD, Hyun Jung Koo, MD, Jung Hee Son, MD, Ji Hun Kang, MD, Jooae Choe, MD, Mi Yeon Park, MD, Se Jin Choi, MD, and Yura Ahn, MD, for participating as readers. This work was supported by the National Research Foundation of Korea (NRF-2018R1C1B6006371 to GS Hong). The data sets are not publicly available due to restrictions in the data-sharing agreements with the data sources. Ethical approval for the use of the deidentified slides in this study was granted by the Institutional Review Board of the Asan Medical Center.</p>
    </ack>
    <fn-group>
      <fn fn-type="con">
        <p>HYP wrote the original draft, analyzed the data, and performed formal analysis. HJB wrote the original draft and provided technical guidance for the project. GSH and NKK conceptualized the project and provided the methodology for the project. All authors reviewed the final manuscript.</p>
      </fn>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sorin</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Barash</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Konen</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Klang</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Creating Artificial Images for Radiology Applications Using Generative Adversarial Networks (GANs) - A Systematic Review</article-title>
          <source>Acad Radiol</source>
          <year>2020</year>
          <month>08</month>
          <volume>27</volume>
          <issue>8</issue>
          <fpage>1175</fpage>
          <lpage>1185</lpage>
          <pub-id pub-id-type="doi">10.1016/j.acra.2019.12.024</pub-id>
          <pub-id pub-id-type="medline">32035758</pub-id>
          <pub-id pub-id-type="pii">S1076-6332(20)30021-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chartrand</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>PM</given-names>
            </name>
            <name name-style="western">
              <surname>Vorontsov</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Drozdzal</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Turcotte</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Pal</surname>
              <given-names>CJ</given-names>
            </name>
            <name name-style="western">
              <surname>Kadoury</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Deep Learning: A Primer for Radiologists</article-title>
          <source>Radiographics</source>
          <year>2017</year>
          <volume>37</volume>
          <issue>7</issue>
          <fpage>2113</fpage>
          <lpage>2131</lpage>
          <pub-id pub-id-type="doi">10.1148/rg.2017170077</pub-id>
          <pub-id pub-id-type="medline">29131760</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ker</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Rao</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Deep Learning Applications in Medical Image Analysis</article-title>
          <source>IEEE Access</source>
          <year>2018</year>
          <volume>6</volume>
          <fpage>9375</fpage>
          <lpage>9389</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://paperpile.com/b/JmyQqQ/YVRQr"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/ACCESS.2017.2788044</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Jun</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Cho</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>GB</given-names>
            </name>
            <name name-style="western">
              <surname>Seo</surname>
              <given-names>JB</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Deep Learning in Medical Imaging: General Overview</article-title>
          <source>Korean J Radiol</source>
          <year>2017</year>
          <volume>18</volume>
          <issue>4</issue>
          <fpage>570</fpage>
          <lpage>584</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.kjronline.org/DOIx.php?id=10.3348/kjr.2017.18.4.570"/>
          </comment>
          <pub-id pub-id-type="doi">10.3348/kjr.2017.18.4.570</pub-id>
          <pub-id pub-id-type="medline">28670152</pub-id>
          <pub-id pub-id-type="pmcid">PMC5447633</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Miotto</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Dudley</surname>
              <given-names>JT</given-names>
            </name>
          </person-group>
          <article-title>Deep learning for healthcare: review, opportunities and challenges</article-title>
          <source>Brief Bioinform</source>
          <year>2018</year>
          <month>11</month>
          <day>27</day>
          <volume>19</volume>
          <issue>6</issue>
          <fpage>1236</fpage>
          <lpage>1246</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/28481991"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/bib/bbx044</pub-id>
          <pub-id pub-id-type="medline">28481991</pub-id>
          <pub-id pub-id-type="pii">3800524</pub-id>
          <pub-id pub-id-type="pmcid">PMC6455466</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kazeminia</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Baur</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Kuijper</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>van Ginneken</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Navab</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Albarqouni</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Mukhopadhyay</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>GANs for medical image analysis</article-title>
          <source>Artificial Intelligence in Medicine</source>
          <year>2020</year>
          <month>09</month>
          <volume>109</volume>
          <fpage>101938</fpage>
          <pub-id pub-id-type="doi">10.1016/j.artmed.2020.101938</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yi</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Walia</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Babyn</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Generative adversarial network in medical imaging: A review</article-title>
          <source>Med Image Anal</source>
          <year>2019</year>
          <month>12</month>
          <volume>58</volume>
          <fpage>101552</fpage>
          <pub-id pub-id-type="doi">10.1016/j.media.2019.101552</pub-id>
          <pub-id pub-id-type="medline">31521965</pub-id>
          <pub-id pub-id-type="pii">S1361-8415(18)30843-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Frid-Adar</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Diamant</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Klang</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Amitai</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Goldberger</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Greenspan</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>GAN-based synthetic medical image augmentation for increased CNN performance in liver lesion classification</article-title>
          <source>Neurocomputing</source>
          <year>2018</year>
          <month>12</month>
          <volume>321</volume>
          <fpage>321</fpage>
          <lpage>331</lpage>
          <pub-id pub-id-type="doi">10.1016/j.neucom.2018.09.013</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gadermayr</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Müller</surname>
              <given-names>Madlaine</given-names>
            </name>
            <name name-style="western">
              <surname>Truhn</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Krämer</surname>
              <given-names>Nils</given-names>
            </name>
            <name name-style="western">
              <surname>Merhof</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Gess</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Domain-specific data augmentation for segmenting MR images of fatty infiltrated human thighs with neural networks</article-title>
          <source>J Magn Reson Imaging</source>
          <year>2019</year>
          <month>06</month>
          <volume>49</volume>
          <issue>6</issue>
          <fpage>1676</fpage>
          <lpage>1683</lpage>
          <pub-id pub-id-type="doi">10.1002/jmri.26544</pub-id>
          <pub-id pub-id-type="medline">30623506</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kazuhiro</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Werner</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Toriumi</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Javadi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Pomper</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Solnes</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Verde</surname>
              <given-names>Franco</given-names>
            </name>
            <name name-style="western">
              <surname>Higuchi</surname>
              <given-names>Takahiro</given-names>
            </name>
            <name name-style="western">
              <surname>Rowe</surname>
              <given-names>Steven P</given-names>
            </name>
          </person-group>
          <article-title>Generative Adversarial Networks for the Creation of Realistic Artificial Brain Magnetic Resonance Images</article-title>
          <source>Tomography</source>
          <year>2018</year>
          <month>12</month>
          <volume>4</volume>
          <issue>4</issue>
          <fpage>159</fpage>
          <lpage>163</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=TOMO-2018-00042"/>
          </comment>
          <pub-id pub-id-type="doi">10.18383/j.tom.2018.00042</pub-id>
          <pub-id pub-id-type="medline">30588501</pub-id>
          <pub-id pub-id-type="pii">TOMO-2018-00042</pub-id>
          <pub-id pub-id-type="pmcid">PMC6299742</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Russ</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Goerttler</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Schnurr</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bauer</surname>
              <given-names>DF</given-names>
            </name>
            <name name-style="western">
              <surname>Hatamikia</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Schad</surname>
              <given-names>LR</given-names>
            </name>
            <name name-style="western">
              <surname>Zöllner</surname>
              <given-names>Frank G</given-names>
            </name>
            <name name-style="western">
              <surname>Chung</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Synthesis of CT images from digital body phantoms using CycleGAN</article-title>
          <source>Int J Comput Assist Radiol Surg</source>
          <year>2019</year>
          <month>10</month>
          <volume>14</volume>
          <issue>10</issue>
          <fpage>1741</fpage>
          <lpage>1750</lpage>
          <pub-id pub-id-type="doi">10.1007/s11548-019-02042-9</pub-id>
          <pub-id pub-id-type="medline">31378841</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11548-019-02042-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ben-Cohen</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Klang</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Raskin</surname>
              <given-names>SP</given-names>
            </name>
            <name name-style="western">
              <surname>Soffer</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ben-Haim</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Konen</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Amitai</surname>
              <given-names>MM</given-names>
            </name>
            <name name-style="western">
              <surname>Greenspan</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Cross-modality synthesis from CT to PET using FCN and GAN networks for improved automated lesion detection</article-title>
          <source>Engineering Applications of Artificial Intelligence</source>
          <year>2019</year>
          <month>02</month>
          <volume>78</volume>
          <fpage>186</fpage>
          <lpage>194</lpage>
          <pub-id pub-id-type="doi">10.1016/j.engappai.2018.11.013</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dar</surname>
              <given-names>SU</given-names>
            </name>
            <name name-style="western">
              <surname>Yurt</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Karacan</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Erdem</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Erdem</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Cukur</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Image Synthesis in Multi-Contrast MRI With Conditional Generative Adversarial Networks</article-title>
          <source>IEEE Trans. Med. Imaging</source>
          <year>2019</year>
          <month>10</month>
          <volume>38</volume>
          <issue>10</issue>
          <fpage>2375</fpage>
          <lpage>2388</lpage>
          <pub-id pub-id-type="doi">10.1109/tmi.2019.2901750</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Tyagi</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Rimner</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Deasy</surname>
              <given-names>JO</given-names>
            </name>
            <name name-style="western">
              <surname>Veeraraghavan</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Cross-modality (CT-MRI) prior augmented deep learning for robust lung tumor segmentation from small MR datasets</article-title>
          <source>Med Phys</source>
          <year>2019</year>
          <month>10</month>
          <volume>46</volume>
          <issue>10</issue>
          <fpage>4392</fpage>
          <lpage>4404</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/31274206"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/mp.13695</pub-id>
          <pub-id pub-id-type="medline">31274206</pub-id>
          <pub-id pub-id-type="pmcid">PMC6800584</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lei</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Harms</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Shu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Jani</surname>
              <given-names>AB</given-names>
            </name>
            <name name-style="western">
              <surname>Curran</surname>
              <given-names>WJ</given-names>
            </name>
            <name name-style="western">
              <surname>Mao</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>MRI-only based synthetic CT generation using dense cycle consistent generative adversarial networks</article-title>
          <source>Med Phys</source>
          <year>2019</year>
          <month>08</month>
          <volume>46</volume>
          <issue>8</issue>
          <fpage>3565</fpage>
          <lpage>3581</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/31112304"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/mp.13617</pub-id>
          <pub-id pub-id-type="medline">31112304</pub-id>
          <pub-id pub-id-type="pmcid">PMC6692192</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vitale</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Orlando</surname>
              <given-names>JI</given-names>
            </name>
            <name name-style="western">
              <surname>Iarussi</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Larrabide</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Improving realism in patient-specific abdominal ultrasound simulation using CycleGANs</article-title>
          <source>Int J Comput Assist Radiol Surg</source>
          <year>2020</year>
          <month>02</month>
          <volume>15</volume>
          <issue>2</issue>
          <fpage>183</fpage>
          <lpage>192</lpage>
          <pub-id pub-id-type="doi">10.1007/s11548-019-02046-5</pub-id>
          <pub-id pub-id-type="medline">31392671</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11548-019-02046-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kang</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Koo</surname>
              <given-names>HJ</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>DH</given-names>
            </name>
            <name name-style="western">
              <surname>Seo</surname>
              <given-names>JB</given-names>
            </name>
            <name name-style="western">
              <surname>Ye</surname>
              <given-names>JC</given-names>
            </name>
          </person-group>
          <article-title>Cycle-consistent adversarial denoising network for multiphase coronary CT angiography</article-title>
          <source>Med Phys</source>
          <year>2019</year>
          <month>02</month>
          <volume>46</volume>
          <issue>2</issue>
          <fpage>550</fpage>
          <lpage>562</lpage>
          <pub-id pub-id-type="doi">10.1002/mp.13284</pub-id>
          <pub-id pub-id-type="medline">30449055</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>KH</given-names>
            </name>
            <name name-style="western">
              <surname>Do</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Improving resolution of MR images with an adversarial network incorporating images with different contrast</article-title>
          <source>Med Phys</source>
          <year>2018</year>
          <month>07</month>
          <volume>45</volume>
          <issue>7</issue>
          <fpage>3120</fpage>
          <lpage>3131</lpage>
          <pub-id pub-id-type="doi">10.1002/mp.12945</pub-id>
          <pub-id pub-id-type="medline">29729006</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Gu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Generating synthesized computed tomography (CT) from cone-beam computed tomography (CBCT) using CycleGAN for adaptive radiation therapy</article-title>
          <source>Phys Med Biol</source>
          <year>2019</year>
          <month>06</month>
          <day>10</day>
          <volume>64</volume>
          <issue>12</issue>
          <fpage>125002</fpage>
          <pub-id pub-id-type="doi">10.1088/1361-6560/ab22f9</pub-id>
          <pub-id pub-id-type="medline">31108465</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>You</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Cong</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Shan</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Gjesteby</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Ju</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Structurally-Sensitive Multi-Scale Deep Neural Network for Low-Dose CT Denoising</article-title>
          <source>IEEE Access</source>
          <year>2018</year>
          <volume>6</volume>
          <fpage>41839</fpage>
          <lpage>41855</lpage>
          <pub-id pub-id-type="doi">10.1109/access.2018.2858196</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Lei</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Thomas</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Curran</surname>
              <given-names>WJ</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Automatic multiorgan segmentation in thorax CT images using U-net-GAN</article-title>
          <source>Med Phys</source>
          <year>2019</year>
          <month>05</month>
          <volume>46</volume>
          <issue>5</issue>
          <fpage>2157</fpage>
          <lpage>2168</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/30810231"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/mp.13458</pub-id>
          <pub-id pub-id-type="medline">30810231</pub-id>
          <pub-id pub-id-type="pmcid">PMC6510589</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Guo</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Mu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Guo</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Accurate colorectal tumor segmentation for CT scans based on the label assignment generative adversarial network</article-title>
          <source>Med Phys</source>
          <year>2019</year>
          <month>08</month>
          <volume>46</volume>
          <issue>8</issue>
          <fpage>3532</fpage>
          <lpage>3542</lpage>
          <pub-id pub-id-type="doi">10.1002/mp.13584</pub-id>
          <pub-id pub-id-type="medline">31087327</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Seah</surname>
              <given-names>JCY</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>JSN</given-names>
            </name>
            <name name-style="western">
              <surname>Kitchen</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gaillard</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Dixon</surname>
              <given-names>AF</given-names>
            </name>
          </person-group>
          <article-title>Chest Radiographs in Congestive Heart Failure: Visualizing Neural Network Learning</article-title>
          <source>Radiology</source>
          <year>2019</year>
          <month>02</month>
          <volume>290</volume>
          <issue>2</issue>
          <fpage>514</fpage>
          <lpage>522</lpage>
          <pub-id pub-id-type="doi">10.1148/radiol.2018180887</pub-id>
          <pub-id pub-id-type="medline">30398431</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xue</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Long</surname>
              <given-names>LR</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>SegAN: Adversarial Network with Multi-scale L Loss for Medical Image Segmentation</article-title>
          <source>Neuroinformatics</source>
          <year>2018</year>
          <month>10</month>
          <volume>16</volume>
          <issue>3-4</issue>
          <fpage>383</fpage>
          <lpage>392</lpage>
          <pub-id pub-id-type="doi">10.1007/s12021-018-9377-x</pub-id>
          <pub-id pub-id-type="medline">29725916</pub-id>
          <pub-id pub-id-type="pii">10.1007/s12021-018-9377-x</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schlegl</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Seeböck</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Waldstein</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Schmidt-Erfurth</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Langs</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Unsupervised anomaly detection with generative adversarial networks to guide marker discovery</article-title>
          <year>2017</year>
          <conf-name>International conference on information processing in medical imaging</conf-name>
          <conf-date>25-30 June 2017</conf-date>
          <conf-loc>Boone, United States</conf-loc>
          <pub-id pub-id-type="doi">10.1007/978-3-319-59050-9_12</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Alex</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Chennamsetty</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Krishnamurthi</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Generative adversarial networks for brain lesion detection</article-title>
          <year>2017</year>
          <conf-name>The international society for optics and photonics</conf-name>
          <conf-date>26 February-2 March 2017</conf-date>
          <conf-loc>San Jose, California, United States</conf-loc>
          <pub-id pub-id-type="doi">10.1117/12.2254487</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Radford</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Metz</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Chintala</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Unsupervised representation learning with deep convolutional generative adversarial networks</article-title>
          <source>arXiv</source>
          <year>2015</year>
          <access-date>2021-01-02</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/1511.06434">https://arxiv.org/abs/1511.06434</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Karras</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Aila</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Laine</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lehtinen</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Progressive growing of GANS for improved quality, stability, and variation</article-title>
          <source>arXiv</source>
          <year>2017</year>
          <access-date>2021-01-02</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/1710.10196">https://arxiv.org/abs/1710.10196</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Karras</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Laine</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Aila</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>A Style-Based Generator Architecture for Generative Adversarial Networks</article-title>
          <source>IEEE Trans Pattern Anal Mach Intell</source>
          <year>2020</year>
          <month>02</month>
          <day>02</day>
          <volume>PP</volume>
          <pub-id pub-id-type="doi">10.1109/TPAMI.2020.2970919</pub-id>
          <pub-id pub-id-type="medline">32012000</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Brock</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Donahue</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Simonyan</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Large scale GAN training for high fidelity natural image synthesis</article-title>
          <source>arXiv</source>
          <year>2019</year>
          <access-date>2021-01-02</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/1809.11096">https://arxiv.org/abs/1809.11096</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mirsky</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Mahler</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Shelef</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Elovici</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>CT-GAN: malicious tampering of 3D medical imagery using deep learning</article-title>
          <source>arXiv</source>
          <year>2019</year>
          <access-date>2021-01-02</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/1901.03597">https://arxiv.org/abs/1901.03597</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Monnier-Cholley</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Carrat</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Cholley</surname>
              <given-names>BP</given-names>
            </name>
            <name name-style="western">
              <surname>Tubiana</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Arrivé</surname>
              <given-names>Lionel</given-names>
            </name>
          </person-group>
          <article-title>Detection of lung cancer on radiographs: receiver operating characteristic analyses of radiologists', pulmonologists', and anesthesiologists' performance</article-title>
          <source>Radiology</source>
          <year>2004</year>
          <month>12</month>
          <volume>233</volume>
          <issue>3</issue>
          <fpage>799</fpage>
          <lpage>805</lpage>
          <pub-id pub-id-type="doi">10.1148/radiol.2333031478</pub-id>
          <pub-id pub-id-type="medline">15486213</pub-id>
          <pub-id pub-id-type="pii">2333031478</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Quekel</surname>
              <given-names>LG</given-names>
            </name>
            <name name-style="western">
              <surname>Kessels</surname>
              <given-names>AG</given-names>
            </name>
            <name name-style="western">
              <surname>Goei</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>van Engelshoven</surname>
              <given-names>JM</given-names>
            </name>
          </person-group>
          <article-title>Detection of lung cancer on the chest radiograph: a study on observer performance</article-title>
          <source>European Journal of Radiology</source>
          <year>2001</year>
          <month>8</month>
          <volume>39</volume>
          <issue>2</issue>
          <fpage>111</fpage>
          <lpage>116</lpage>
          <pub-id pub-id-type="doi">10.1016/s0720-048x(01)00301-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Killoran</surname>
              <given-names>JH</given-names>
            </name>
            <name name-style="western">
              <surname>Gerbaudo</surname>
              <given-names>VH</given-names>
            </name>
            <name name-style="western">
              <surname>Mamede</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ionascu</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Berbeco</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Motion artifacts occurring at the lung/diaphragm interface using 4D CT attenuation correction of 4D PET scans</article-title>
          <source>J Appl Clin Med Phys</source>
          <year>2011</year>
          <month>11</month>
          <day>15</day>
          <volume>12</volume>
          <issue>4</issue>
          <fpage>3502</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/22089005"/>
          </comment>
          <pub-id pub-id-type="doi">10.1120/jacmp.v12i4.3502</pub-id>
          <pub-id pub-id-type="medline">22089005</pub-id>
          <pub-id pub-id-type="pmcid">PMC5718739</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cai</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Xing</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Pancreas segmentation in CT and MRI images via domain specific network designing and recurrent neural contextual learning</article-title>
          <source>arXiv</source>
          <year>2018</year>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bermudez</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Plassard</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Davis</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Newton</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Resnick</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Landman</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Learning Implicit Brain MRI Manifolds with Deep Learning</article-title>
          <source>Proc SPIE Int Soc Opt Eng</source>
          <year>2018</year>
          <month>03</month>
          <volume>10574</volume>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/29887659"/>
          </comment>
          <pub-id pub-id-type="doi">10.1117/12.2293515</pub-id>
          <pub-id pub-id-type="medline">29887659</pub-id>
          <pub-id pub-id-type="pmcid">PMC5990281</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bowles</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Guerrero</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Bentley</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Gunn</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Hammers</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Gan augmentation: Augmenting training data using generative adversarial networks</article-title>
          <source>arXiv</source>
          <year>2018</year>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bowles</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Gunn</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Hammers</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rueckert</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Modelling the progression of Alzheimer's disease in MRI using generative adversarial networks</article-title>
          <year>2018</year>
          <conf-name>The international society for optics and photonics</conf-name>
          <conf-date>25 February-1 March 2018</conf-date>
          <conf-loc>San Jose, California, United States</conf-loc>
          <publisher-loc>Medical Imaging 2018</publisher-loc>
          <publisher-name>Image Processing</publisher-name>
          <pub-id pub-id-type="doi">10.1117/12.2293256</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chuquicusma</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Hussein</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Burt</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bagci</surname>
              <given-names>U</given-names>
            </name>
          </person-group>
          <article-title>How to fool radiologists with generative adversarial networks? a visual turing test for lung cancer diagnosis</article-title>
          <year>2018</year>
          <conf-name>2018 IEEE 15th international symposium on biomedical imaging</conf-name>
          <conf-date>4-7 April 2018</conf-date>
          <conf-loc>Washington, D.C. United States</conf-loc>
          <pub-id pub-id-type="doi">10.1109/isbi.2018.8363564</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Gooya</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Frangi</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Semi-supervised assessment of incomplete LV coverage in cardiac MRI using generative adversarial nets</article-title>
          <year>2017</year>
          <conf-name>2017 International Workshop on Simulation and Synthesis in Medical Imaging</conf-name>
          <conf-date>10 September 2017</conf-date>
          <conf-loc>Québec City, Canada</conf-loc>
          <pub-id pub-id-type="doi">10.1007/978-3-319-68127-6_7</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
