<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMI</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Med Inform</journal-id>
      <journal-title>JMIR Medical Informatics</journal-title>
      <issn pub-type="epub">2291-9694</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v10i10e38640</article-id>
      <article-id pub-id-type="pmid">36315222</article-id>
      <article-id pub-id-type="doi">10.2196/38640</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Tooth-Related Disease Detection System Based on Panoramic Images and Optimization Through Automation: Development Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Lovis</surname>
            <given-names>Christian</given-names>
          </name>
        </contrib>
        <contrib contrib-type="editor">
          <name>
            <surname>Hefner</surname>
            <given-names>Jennifer</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Lim</surname>
            <given-names>Gilbert</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Li</surname>
            <given-names>Zhongqiang</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Kim</surname>
            <given-names>Changgyun</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-8392-9055</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Jeong</surname>
            <given-names>Hogul</given-names>
          </name>
          <degrees>DDS, PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>AI Cloud R&#38;D Center, InVisionLab Inc</institution>
            <addr-line>G114, 128, Beobwon-ro, Songpa-gu</addr-line>
            <addr-line>Seoul, 05854</addr-line>
            <country>Republic of Korea</country>
            <phone>82 70 4415 2229</phone>
            <fax>82 50 4439 7765</fax>
            <email>rari98@naver.com</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-3322-8453</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Park</surname>
            <given-names>Wonse</given-names>
          </name>
          <degrees>DDS, PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2081-1156</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Kim</surname>
            <given-names>Donghyun</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-0841-3830</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>AI Cloud R&#38;D Center, InVisionLab Inc</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Department of Advanced General Dentistry, College of Dentistry, Yonsei University &#38; Institute for Innovation in Digital Healthcare</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Hogul Jeong <email>rari98@naver.com</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <month>10</month>
        <year>2022</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>31</day>
        <month>10</month>
        <year>2022</year>
      </pub-date>
      <volume>10</volume>
      <issue>10</issue>
      <elocation-id>e38640</elocation-id>
      <history>
        <date date-type="received">
          <day>25</day>
          <month>4</month>
          <year>2022</year>
        </date>
        <date date-type="rev-request">
          <day>16</day>
          <month>5</month>
          <year>2022</year>
        </date>
        <date date-type="rev-recd">
          <day>11</day>
          <month>7</month>
          <year>2022</year>
        </date>
        <date date-type="accepted">
          <day>11</day>
          <month>8</month>
          <year>2022</year>
        </date>
      </history>
      <copyright-statement>©Changgyun Kim, Hogul Jeong, Wonse Park, Donghyun Kim. Originally published in JMIR Medical Informatics (https://medinform.jmir.org), 31.10.2022.</copyright-statement>
      <copyright-year>2022</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Informatics, is properly cited. The complete bibliographic information, a link to the original publication on https://medinform.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://medinform.jmir.org/2022/10/e38640" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Early detection of tooth-related diseases in patients plays a key role in maintaining their dental health and preventing future complications. Since dentists are not overly attentive to tooth-related diseases that may be difficult to judge visually, many patients miss timely treatment. The 5 representative tooth-related diseases, that is, coronal caries or defect, proximal caries, cervical caries or abrasion, periapical radiolucency, and residual root can be detected on panoramic images. In this study, a web service was constructed for the detection of these diseases on panoramic images in real time, which helped shorten the treatment planning time and reduce the probability of misdiagnosis.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study designed a model to assess tooth-related diseases in panoramic images by using artificial intelligence in real time. This model can perform an auxiliary role in the diagnosis of tooth-related diseases by dentists and reduce the treatment planning time spent through telemedicine.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>For learning the 5 tooth-related diseases, 10,000 panoramic images were modeled: 4206 coronal caries or defects, 4478 proximal caries, 6920 cervical caries or abrasion, 8290 periapical radiolucencies, and 1446 residual roots. To learn the model, the fast region-based convolutional network (Fast R-CNN), residual neural network (ResNet), and inception models were used. Learning about the 5 tooth-related diseases completely did not provide accurate information on the diseases because of indistinct features present in the panoramic pictures. Therefore, 1 detection model was applied to each tooth-related disease, and the models for each of the diseases were integrated to increase accuracy.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The Fast R-CNN model showed the highest accuracy, with an accuracy of over 90%, in diagnosing the 5 tooth-related diseases. Thus, Fast R-CNN was selected as the final judgment model as it facilitated the real-time diagnosis of dental diseases that are difficult to judge visually from radiographs and images, thereby assisting the dentists in their treatment plans.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>The Fast R-CNN model showed the highest accuracy in the real-time diagnosis of dental diseases and can therefore play an auxiliary role in shortening the treatment planning time after the dentists diagnose the tooth-related disease. In addition, by updating the captured panoramic images of patients on the web service developed in this study, we are looking forward to increasing the accuracy of diagnosing these 5 tooth-related diseases. The dental diagnosis system in this study takes 2 minutes for diagnosing 5 diseases in 1 panoramic image. Therefore, this system plays an effective role in setting a dental treatment schedule.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>object detection</kwd>
        <kwd>tooth</kwd>
        <kwd>diagnosis</kwd>
        <kwd>panorama</kwd>
        <kwd>dentistry</kwd>
        <kwd>dental health</kwd>
        <kwd>oral health</kwd>
        <kwd>dental caries</kwd>
        <kwd>image analysis</kwd>
        <kwd>artificial intelligence</kwd>
        <kwd>detection model</kwd>
        <kwd>machine learning</kwd>
        <kwd>automation</kwd>
        <kwd>diagnosis system</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Usage of Medical Data and Artificial Intelligence in Health Care</title>
        <p>Several recent studies [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref3">3</xref>] have used various medical data for eHealth care, but they are merely adding digital and network functions to the existing medical equipment, and remote services included in the treatment are unused. In addition, although eHealth care processes medical data and information through the networking function of doctors and patients, in reality, patients cannot obtain and confirm much information. Although a large amount of medical data has been accumulated, there has been a limit to using these data to provide information to patients and find new practical implications. As the importance of medical data has increased, a clinical data warehouse has been established to research how to utilize various medical data and for patients to find medical information easily through the provision of public and private medical data [<xref ref-type="bibr" rid="ref4">4</xref>]. Various studies on the application of big data and artificial intelligence (AI) in medicine have shown that the University of North Carolina Healthcare has dramatically reduced the time and effort of medical staff by performing unstructured medical data analysis using content analytics and natural language processing and automatically extracting abnormal parts by machine reading and automatic processing algorithms in mammography screenings and pap smears [<xref ref-type="bibr" rid="ref5">5</xref>]. Patients’ conditions are diagnosed remotely after the initial treatment by clinical professionals providing them with the medical information to manage their disease [<xref ref-type="bibr" rid="ref6">6</xref>]. Recently, a method that allows users to easily use various medical data based on their experiences and help them make decisions through optimal information delivery when applying it to medical systems has been studied [<xref ref-type="bibr" rid="ref7">7</xref>]. Using medical data and AI, patients can prevent diseases in advance and increase their autonomy in treatment scheduling by receiving knowledge of their condition and medical information. In addition, AI using medical data can reduce medical time and cost by assisting doctors in treatment.</p>
      </sec>
      <sec>
        <title>Dental Caries Diagnosis Using Images</title>
        <p>Dental caries is diagnosed using videos and radiographs, and studies [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>] have shown the processing of videos and images for a more accurate diagnosis of dental caries. In 2003, Møystad et al [<xref ref-type="bibr" rid="ref10">10</xref>] diagnosed dental caries by using pre-enhanced Digora storage phosphor images while performing radiography on areas where tooth decay occurred and where panorama X-ray and computed tomography systems (Soredex Medical Systems) could not be used because of territorial issues. In 2017, Veena Divya et al [<xref ref-type="bibr" rid="ref11">11</xref>] diagnosed dental caries by using the contrast map of a panoramic image, controlling the contrast of the bright and dark parts to make the blurred panoramic image clear. In the same year, Singh and Sehgal [<xref ref-type="bibr" rid="ref12">12</xref>] added light contrast to panoramic images to enhance the clarity and diagnose dental caries by exploring the dark areas, which corresponded to dental caries in the images. In 2019, Kale et al [<xref ref-type="bibr" rid="ref13">13</xref>] showed that mothers were able to diagnose dental caries in photos of normal and decayed teeth obtained with a smartphone by using an atlas. In 2020, the Laplacian filtering backpropagation algorithm was used to learn and diagnose dental caries [<xref ref-type="bibr" rid="ref14">14</xref>]. In 2021, Bayraktar and Ayan [<xref ref-type="bibr" rid="ref15">15</xref>] diagnosed dental caries by using image deep learning algorithms; that study used 1000 radiographic teeth data points for learning and validation. Labeling the dental caries was performed by a professional dentist, and dental caries in the premolars and molars were examined [<xref ref-type="bibr" rid="ref15">15</xref>].</p>
      </sec>
      <sec>
        <title>Importance of Dental Caries Diagnosis</title>
        <p>Dental caries is one of the most common chronic diseases worldwide. Oral diseases are recognized as serious diseases like other systemic diseases and were classified by the World Health Organization in 2011 as serious noncommunicable diseases. The teeth are one of the most important organs in the body, and dental caries is one of the biggest causes of tooth disease [<xref ref-type="bibr" rid="ref16">16</xref>]. Dental caries develop and progress in 4 stages, starting as a tiny black spot in stage 1, followed by enamel decay in stage 2, nerve damage in stage 3, and pulp damage and pus and inflammation in stage 4. Dental caries can be easily repaired with simple treatment in stages 1 and 2; however, if the initial stages 1 and 2 are not judged or are overlooked, dental caries progress to stages 3 and 4. This leads to complications such as toothache, inflammation, and acute osteomyelitis, which destroys the bones around the teeth. Therefore, it is important to prevent and manage dental caries. The management and early removal of dental caries through an initial diagnosis are essential factors for good dental health [<xref ref-type="bibr" rid="ref17">17</xref>]. However, if there are no clinical symptoms in the early stages of dental caries, people often do not pay much attention. In addition, since dental treatment is generally performed to promptly resolve uncomfortable areas, dentists can also pass over without diagnosing any of the following: proximal caries, which occurs between teeth; periapical radiolucency, which occurs from the root apex; and residual root in the bone. Therefore, to solve this problem, AI can help dentists diagnose early dental caries and other tooth-related diseases that may be difficult to judge visually by using panoramic images. Through this system, dentists and patients can reduce treatment planning time and easily treat tooth problems before they worsen, and patients can identify problems with their teeth and improve their quality of life by preventing diseases that could occur in the future.</p>
        <p>Although various simple and easy AI diagnostic methods in the dental field have been studied, there are limitations [<xref ref-type="bibr" rid="ref18">18</xref>] in diagnosing dental caries accurately in tooth sections. Since previous models have been used for diagnosing dental caries in the entire tooth, there are limitations in diagnosing dental caries that require precise diagnosis, such as proximal and root caries. This study aims to learn and diagnose 5 tooth-related diseases (ie, coronal caries or defects, proximal caries, cervical caries or abrasion, periapical radiolucency, and residual root) by using image deep learning models, which can assist dentists’ diagnosis by reducing treatment planning time.</p>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Data Collection</title>
        <p>Since this study evaluated 5 tooth-related diseases (ie, coronal caries or defects, proximal caries, cervical caries or abrasion, periapical radiolucency, and residual root), which are commonly diagnosed using dental imaging, training data were generated by collecting and labeling panoramic images with tooth-related diseases. This study used panoramic images provided by 50 dental clinics from 2001 to 2021. Data from 30 dental hospitals in Korea were collected, anonymized, and used for this study. Among the anonymized genders, there were 3702 males and 3783 females, with a total of 2515 unidentified persons who could not be identified. Population distribution by age group did not include teenagers; there were 1721 persons in their 20s, 956 persons in their 30s, 1134 persons in their 40s, 1351 persons in their 50s, 1914 persons in their 60s and older, and 2934 persons with unknown identities.</p>
        <p>A total of 10,000 panoramic images with one or more of the following 5 tooth-related diseases were used for labeling: 4206 images of coronal caries or defects, 4478 images of proximal caries, 6920 images of cervical caries or abrasion, 8290 images of periapical radiolucency, and 1446 images of residual roots. As shown in <xref rid="figure1" ref-type="fig">Figure 1</xref> and <xref ref-type="table" rid="table1">Table 1</xref>, coronal caries or defects showed defects or radiolucencies that lacked density compared to the normal in the coronal portion of the tooth, proximal caries showed radiolucency that lacked density compared to the normal in the adjacent surfaces between teeth, and cervical caries or abrasion showed radiolucency that lacked density compared to the normal in the cervical area of the tooth. In addition, periapical radiolucency showed a lower density than normal radiolucency in the periapical area, and residual root means that the coronal portion is completely lost and only the root portion remains. Each label was created by focusing on these findings on the panoramic images. We used 10,000 images of male and female Koreans to label each tooth-related disease. Radiologic specialists with over 20 years of dental imaging experience performed the labeling. It took 2 minutes on average for the radiologic specialists to read the 5 diseases presented in <xref ref-type="table" rid="table1">Table 1</xref> on 1 panoramic image of the tooth, and it took approximately 6 hours on average to read 100, including the break time. Therefore, it took approximately 50 days to label 10,000 samples. <xref ref-type="table" rid="table1">Table 1</xref> shows the standards agreed upon by the graders. This standard is presented in Oral Radiology: Principles and Interpretation [<xref ref-type="bibr" rid="ref19">19</xref>].</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Findings of each tooth-related disease (ie, coronal caries or defect, proximal caries, cervical caries or abrasion, periapical radiolucency, residual root, in clockwise order from the top left).</p>
          </caption>
          <graphic xlink:href="medinform_v10i10e38640_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Findings of each tooth-related disease.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="340"/>
            <col width="660"/>
            <thead>
              <tr valign="top">
                <td>Tooth-related diseases</td>
                <td>Findings</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Coronal caries or defect</td>
                <td>Defect or radiolucency that lacks density compared to normal in the coronal portion of a tooth</td>
              </tr>
              <tr valign="top">
                <td>Proximal caries</td>
                <td>Radiolucency that lacks density compared to normal in the adjacent surfaces between teeth</td>
              </tr>
              <tr valign="top">
                <td>Cervical caries or abrasion</td>
                <td>Radiolucency that lacks density compared to normal in the cervical area of the tooth</td>
              </tr>
              <tr valign="top">
                <td>Periapical radiolucency</td>
                <td>Low density compared to normal in the periapical area of tooth</td>
              </tr>
              <tr valign="top">
                <td>Residual root</td>
                <td>Coronal portion of tooth is completely lost and only the root portion remains</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Learning Model (Designing and Training the Model)</title>
        <p>Labeling was performed by data collection and preprocessing, and thus, an image classification model was used to learn about each of the 5 tooth-related diseases. This study learned dental diseases by using fast region-based convolutional network (Fast R-CNN), residual neural network (ResNet), and inception. The model with the highest accuracy in disease detection was selected. For training the model, 10,000 panoramic images were modeled in total: 4208 coronal caries or defects, 4478 proximal caries, 6920 cervical caries or abrasion, 8290 periapical radiolucency, and 1446 residual roots.</p>
      </sec>
      <sec>
        <title>Model Used in This Study (Additional Case of Model Application)</title>
        <p>Fast R-CNN has increased accuracy compared to the existing object detection algorithms because it extracts the image features and minimizes the noise in image analysis. Fast R-CNN consists of a convolution feature map and a region of interest feature vector [<xref ref-type="bibr" rid="ref20">20</xref>]. The convolution feature map delivers the image to the convolution and max-pooling layers, and the received information is placed as a feature in the region of interest. Thereafter, the feature vector map is converted into a map with various features, and the object value of the object image of class K is determined by moving to the fully connected layers [<xref ref-type="bibr" rid="ref21">21</xref>]. In this process, multiple work losses are minimized, and the learning accuracy is improved by using a loss function. Learning multiple classes of tooth-related diseases in 1 Fast R-CNN model sometimes results in errors in the detection of panoramic images with dark areas, as shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>. Therefore, this study applies a single class to 1 Fast R-CNN model instead of multiple classes to improve the accuracy of detecting tooth-related diseases.</p>
        <p>For image reading, a rectangular bounding box was first used, and segmentation was performed through an algorithm based on about 500 segmentation data. In the case of segmentation, accuracy was not calculated for the segmented data because it was used only for grasping the approximate accuracy. Thereafter, the coordinate values of the box-type tooth classes that are multilabeled in 1 tooth panoramic image were derived. Each disease corresponding to the derived coordinate value was classified by class. Then, each of the 5 tooth classes was applied to learning through the box coordinate values having the corresponding dental disease on the panoramic image. Through this, the input value for 1 model was constructed using the panoramic image data of 1 class and the box coordinate values corresponding to dental diseases. As shown in <xref rid="figure3" ref-type="fig">Figure 3</xref>, a bounding box was designated for each tooth-related disease, and the classes for each tooth-related disease were defined.</p>
        <p>ResNet derives a value through the weight layer to solve the problem of overfitting owing to increased dimensional depth in deep learning, which adds the result learned through the previous weight layer to the activation function and delivers it to the next layer [<xref ref-type="bibr" rid="ref22">22</xref>]. Therefore, this learning method, even if the depth of the learning layer deepens, solves the overfitting problem because important weights can be used for the next learning without forgetting the past learning results [<xref ref-type="bibr" rid="ref23">23</xref>]. Because of these advantages, in this study, deep-layer learning is required to derive detailed results in learning panoramic images with similar image characteristics, and the ResNet model that can learn such a model was selected.</p>
        <p>Inception, like ResNet, is created to solve the overfitting problem and the increase in computational traffic through a lot of learning when the size of the model is increased by increasing the depth of the layer [<xref ref-type="bibr" rid="ref24">24</xref>]. In the inception model, it is possible to derive results in a fast learning time by using a small number of calculations, even in a model with a complex structure, by connecting only nodes with a high relationship between each node [<xref ref-type="bibr" rid="ref25">25</xref>]. In addition, using various convolution filters, we derived a model that can make optimal judgments based on the features derived from each filter. This study evaluated 5 tooth-related diseases by using 3 models: Fast R-CNN, ResNet, and inception. To increase the detection accuracy for 5 tooth-related diseases, a model was designed through a process shown in <xref rid="figure4" ref-type="fig">Figure 4</xref> (additional model), and the 5 tooth-related diseases were learned through Fast R-CNN, ResNet, and inception. In learning tooth-related disease data (the result of the additional model), the 3 models provided good performance for multi-class learning. However, for each part of the panoramic image composed of the contrast ratio of white and black, if multiple classes are learned in one detection model for tooth-related diseases that have similar characteristics but different sizes, there were cases where the black background was detected as a tooth-related disease. As the learning proceeded by inputting data for 5 tooth-related diseases as a whole, more black screens were learned, and the results are shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>. As shown in the box in <xref rid="figure2" ref-type="fig">Figure 2</xref>, there are cases where areas such as the background of other panoramic images that are not included in the teeth are detected. To solve the problem of multi-class learning, as shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>, professional reading experts labeled 10,000 images in a bounding box form with 5 dental diseases in a single tooth image and finally converted it into the CSV format. Label information corresponding to each dental disease was extracted from the data set containing the labeling information of 5 dental diseases, and each data set was derived for each of the 5 dental diseases. Therefore, 5 CSV-format data sets that were composed of panoramic images were modeled in total: 4208 coronal caries or defects, 4478 proximal caries, 6920 cervical caries or abrasion, 8290 periapical radiolucency, and 1446 residual roots. Further, depending on the model, DICOM (digital imaging and communications in medicine) to BMP (bitmap) conversion was performed, and auto brightness correction and adjustment were partially performed.</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Case of misclassification of tooth-related diseases. The green boxes represent detected areas that are not included in the teeth.</p>
          </caption>
          <graphic xlink:href="medinform_v10i10e38640_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Bounding box for each tooth-related disease.</p>
          </caption>
          <graphic xlink:href="medinform_v10i10e38640_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>Integrated detection system for the learning process. DICOM: digital imaging and communications in medicine.</p>
          </caption>
          <graphic xlink:href="medinform_v10i10e38640_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Development of an Integrated Detection System for Tooth-Related Diseases</title>
        <p>While learning about the 5 tooth-related diseases through Fast R-CNN, ResNet, and inception, problems, as shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>, appeared. To solve these problems, a single model was applied to 1 class of tooth-related diseases to create a training model for each of the 5 tooth-related diseases so that varying locations and sizes of the diseases could be detected in detail.</p>
        <p>Based on the process shown in <xref rid="figure4" ref-type="fig">Figure 4</xref>, 5 tooth-related diseases were learned, and dentists and experts designed a real-time diagnosis, as shown in <xref rid="figure5" ref-type="fig">Figure 5</xref>. In designing the process, the service and administrator web were implemented using Python (version 3.6)-based Flask [<xref ref-type="bibr" rid="ref26">26</xref>] engines (version 1.0.0), and the web page configuration was implemented using Jinja template-based HTML and Vanilla JavaScript [<xref ref-type="bibr" rid="ref27">27</xref>]. The communication part of the AI application programming interface was composed of a Python-based Flask engine, which was installed within the Flask engine through model learning using TensorFlow 2.0.0 (Google Brain Team). Additionally, the image data of the database server were divided into file name, photographing date, patient name, patient age, image labeling prediction model data, and image labeling correct answer data to assist the dentist in the diagnosis. In the form of training/validation/test, splits were first performed and then labeled. A total of 6000 pieces were used for training, 2000 pieces were used for validation, and the remaining 2000 pieces were used for test splitting. In fact, we used ResNet/inception as the backbone of Fast R-CNN. As the input value for one model, learning data were constructed using the panoramic image data of one class and the box coordinate values corresponding to dental diseases. Through this, the input value for one model was constructed using the panoramic image data of one class and the box coordinate values corresponding to dental diseases.</p>
        <p>In the training layer structure of each model of Fast R-CNN, ResNet, and inception, looking at the structure of the Fast R-CNN model (region proposal → CNN classification → region of interest projection), region of interest projection and bounding box regression were performed through region of interest pooling. The model is configured as shown in <xref rid="figure5" ref-type="fig">Figure 5</xref>, and 300 range boxes for each dental disease were specified using the CNN model in the region proposal for dental disease detection, and the features of the range corresponding to a specific class were identified. At this time, after converting features of fixed sizes in the region of interest pooling layer into a feature map, a feature vector was generated with a fully connected layer corresponding to each feature. At this time, for each feature, the position of the corresponding class was predicted using SoftMax and Bbox regressor. The epoch of model training was performed 100,000 times, and the learning rate was set to 0.001.</p>
        <p>ResNet improves the accuracy by reducing the depth of the learning layer and increasing the performance compared to the CNN model, which is an existing image analysis model, through residual learning. In order to increase the learning accuracy in general CNNs, many layers are stacked. However, such a deep layer can lower the accuracy of the learning model. When learning through residual learning, the positive error rate can be lowered even when learning in a deep layer. When ResNet derives a value from the weight layer through the activation function in the convolution operation, it imports the previously learned information as it is, as shown in <xref rid="figure5" ref-type="fig">Figure 5</xref>, and learns the residual information, F(x). Looking at the formula, when the input value x is input, the first weight value is multiplied, and the activation function is multiplied by the second weight value. At this time, it is additionally multiplied by x identity, that is, x value. Therefore, since the result is derived through continuous repetition of this process, y is derived by adding a multiple convolutional layer F(x,{W<sub>i</sub>}) and short connection W<sub>s</sub>x, which takes the existing input value as it is, to the result value.</p>
        <disp-formula>
          <italic>y = F (x,{Wi}) + Wsx</italic>
        </disp-formula>
        <p>In this way, by adding information to the result derived from the weight layer, information can be added and computational complexity can be reduced so that a model with faster learning and better performance can be derived. Since ResNet learns 1 dental disease by using 5 models as 1 model, 50 hidden layers of each dental disease were designated for learning. For training, like Fast R-CNN, the training epoch was performed 100,000 times, and the learning rate was set to 0.001.</p>
        <p>The inception model connects the highly correlated nodes when the correlation between each node is high in the fully connected architecture and does not connect the rest so that N clusters are created for each feature. When creating a connected architecture, we additionally convolve features that are far from each other through filters of various sizes for nonuniform and inefficient sparse structures and reduce the number of channels by using a 1 × 1 filter for nodes with high correlation. The inception model was constructed, as shown in <xref rid="figure6" ref-type="fig">Figure 6</xref>. For the model configuration, a dental disease detection model was built using 10 pooling layers. The training epoch was performed 100,000 times, and the learning rate was set to 0.001. When a list of images is received from a computer connected to the X-ray equipment and the data are stored in the server database, a separate image is retransmitted to a system that is requested to be read from the stored data. Thereafter, it provides information read through a detection model for tooth-related diseases in real time so that it can assist dentists in shortening the reading time.</p>
        <p>The overall flow diagram is shown in <xref rid="figure6" ref-type="fig">Figure 6</xref> and is divided into service, manager, and AI algorithm categories. In the service web, the data for each tooth-related disease previously labeled by experts and the updated panoramic images are continuously accumulated and provided to the server. In the manager app, the accumulated data are transmitted to the server, and the transmitted panoramic image is read by dental experts to determine the tooth-related disease. Then, the analysis data are collected through labeling, and the collected data are used to derive the result by using an AI algorithm.</p>
        <p>Based on the process shown in <xref rid="figure5" ref-type="fig">Figure 5</xref>, the detailed process of the tooth-related disease determination system proposed in this study was constructed, and it can be divided into 3 parts (service, system, and personal computer). The service part is designed to receive panoramic image data and read information through the website, and the messaging system is designed for users to communicate through the channel talk application programming interface [<xref ref-type="bibr" rid="ref23">23</xref>]. The information provided to the readers was labeled so that the AI model could be learned, and it was designed to enable continuous data updates. In addition, when the labeling result was applied to the AI model and the AI result was judged again by the reader, it was updated to Case 1 if it was correctly judged and to Case 2 when the judgment was incorrect. Therefore, after being read accurately again by the reader, the accuracy of the model was improved through continuous data updates with the AI server. In the system, a server was built to enable the website of the service part to work. The server was built based on Flask, and it was largely divided into the presentation, business, and persistence layers [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>]. The server connects the user and client systems through 3 layers and enables the movement of data in the database. The database was designed using MongoDB [<xref ref-type="bibr" rid="ref30">30</xref>], which can quickly operate various types of data. AI, chatting, image, and message servers were built into MongoDB to increase the real-time movement speed of the data. The AI server, which plays a role in providing tooth-related disease reading results, updates the results of expert reading provided by doctors and provides the doctor with tooth-related disease results on new images to improve accuracy through mutual feedback, which helps users to understand by providing feedback on the opinions of users on the personal computer. Finally, it stores the dental panoramic image provided through the image server or provides medical information to personal computer users so that they can view and continuously manage the medical records whenever necessary.</p>
        <fig id="figure5" position="float">
          <label>Figure 5</label>
          <caption>
            <p>Flow diagram of the learning process. AI: artificial intelligence; PC: personal computer.</p>
          </caption>
          <graphic xlink:href="medinform_v10i10e38640_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure6" position="float">
          <label>Figure 6</label>
          <caption>
            <p>Schematic diagram of a detection system for tooth-related diseases. AI: artificial intelligence; API: application programming interface; DB: database; PACS: picture archiving and communication system; PC: personal computer.</p>
          </caption>
          <graphic xlink:href="medinform_v10i10e38640_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Ethical Considerations</title>
        <p>Since the data is a retrospective study, it was processed in the direction of protecting personal information through database anonymization, etc. In addition, data collected for research purposes were collected through Cheongju University
           Bioethics Committee IRB (1041107-202208-HR-024-01).</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Detection System</title>
        <p>The web service in this study was built based on the process presented in <xref rid="figure6" ref-type="fig">Figure 6</xref> and <xref rid="figure7" ref-type="fig">Figure 7</xref>. As shown in <xref rid="figure7" ref-type="fig">Figure 7</xref>, panoramic images facilitate faster judgment of dental-related diseases than the conventional doctor’s diagnosis techniques. The detection system aids and shortens the treatment time through the transmission of images taken in real time. <xref rid="figure7" ref-type="fig">Figure 7</xref> shows a case in which a tooth disease was correctly judged and another case in which a dental disease was incorrectly judged. Since cases can be judged inaccurately, doctors can use this auxiliary system to check the patient’s condition once again. <xref rid="figure8" ref-type="fig">Figure 8</xref> shows that patients can check their panoramic images on the web, and they can know about the treatment plan and receive information on tooth-related diseases for effective disease management.</p>
        <fig id="figure7" position="float">
          <label>Figure 7</label>
          <caption>
            <p>Success and fail cases in the detection of the 5 dental diseases.</p>
          </caption>
          <graphic xlink:href="medinform_v10i10e38640_fig7.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure8" position="float">
          <label>Figure 8</label>
          <caption>
            <p>Web system for tooth-related diseases.</p>
          </caption>
          <graphic xlink:href="medinform_v10i10e38640_fig8.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Benefits of Using Web-Based Systems and User Interface</title>
        <p>To detect tooth-related diseases, Fast R-CNN, which has the best performance among the classification models from the point of view of a dentist, was applied. For learning the model for judging the 5 types of tooth-related diseases, dentists can update the panoramic images in real time through the web application programming interface and continuously collect data by improving the accuracy through additional updates on the tooth-related disease detection labeling. <xref rid="figure6" ref-type="fig">Figure 6</xref> shows how the tooth-related disease judgment web service screen appears. <xref rid="figure6" ref-type="fig">Figure 6</xref> shows that by providing doctors and patients with the diagnosis of their diseases through the patient’s panoramic image, past medical records, and current status on the web, doctors can provide prompt treatment for dental diseases and patients can monitor their dental status. Therefore, from the patient’s perspective, users can check medical records and treatment areas through the web screen of the panoramic image provided by the hospital where they have been treated and check for tooth-related diseases. In addition, because the treatment time and the subsequent treatment times can be known, users can use this system to manage their tooth-related diseases, which require continuous management.</p>
      </sec>
      <sec>
        <title>Model Comparison Results</title>
        <p>This study created a detection model for 5 dental diseases that are difficult to judge visually (ie, coronal caries or defect, proximal caries, cervical caries or abrasion, periapical radiolucency, and residual root) by using a dental panoramic image. Fast R-CNN, ResNet, and inception have previously been used to learn about dental disease detection [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref31">31</xref>]. In training the model, 4206 cases of coronal caries or defects, 4478 cases of proximal caries, 6920 cases of cervical caries or abrasion, and 8290 cases of periapical radiolucency, and 1446 cases of residual roots were trained among a total of 10,000 panoramic images. Therefore, a model for judging the 5 types of dental caries using 1 panoramic image was developed by creating a training model for each dental disease into one detection model through an integrated detection system for dental diseases. Regarding the number of training sessions, all 3 models were trained 200,000 times, the results were compared, and the model with the highest accuracy was selected. The results of deriving the precision, sensitivity, and specificity of the detection results for the 5 dental diseases are shown in <xref rid="figure8" ref-type="fig">Figure 8</xref>. As shown in <xref rid="figure8" ref-type="fig">Figure 8</xref>, the coronal defect showed the highest specificity, with an average specificity of 90 or more. In addition, the sensitivity was found to be above 80 on average, indicating that it would show high accuracy even when other data were used for learning.</p>
        <p><xref ref-type="table" rid="table2">Table 2</xref> shows the results of learning with Fast R-CNN, ResNet, and inception for the 5 tooth-related diseases. As shown in <xref ref-type="table" rid="table2">Table 2</xref>, 5 tooth-related diseases were detected with an average accuracy of over 90%. Also, as shown in <xref rid="figure6" ref-type="fig">Figure 6</xref>, the specificity is the highest for the 5 tooth-related diseases. This means that each tooth-related disease can be detected with high accuracy. With the tooth-related disease detection web service presented in this study, considerable time can be saved in diagnosing tooth-related diseases. On average, it takes about 1 minute for dental doctors to judge 5 dental diseases on 1 panoramic image. However, if the system proposed in this study is used, the results of the classification model can be judged at once through the user interface, and the time can be reduced to about 10 seconds in judging dental diseases. Therefore, it is judged to be an effective system to assist in the judgment of dental diseases.</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Tooth-related disease detection results.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="400"/>
            <col width="190"/>
            <col width="190"/>
            <col width="190"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Model, diseases</td>
                <td>Precision</td>
                <td>Sensitivity</td>
                <td>Specificity</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="5">
                  <bold>Fast region-based convolutional network</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Coronal caries or defect</td>
                <td>0.785</td>
                <td>0.708</td>
                <td>0.982</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Proximal caries</td>
                <td>0.484</td>
                <td>0.792</td>
                <td>0.918</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Cervical caries or abrasion</td>
                <td>0.795</td>
                <td>0.767</td>
                <td>0.952</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Periapical radiolucency</td>
                <td>0.824</td>
                <td>0.953</td>
                <td>0.895</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Residual root</td>
                <td>0.640</td>
                <td>0.904</td>
                <td>0.972</td>
              </tr>
              <tr valign="top">
                <td colspan="5">
                  <bold>Inception</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Coronal caries or defect</td>
                <td>0.253</td>
                <td>0.609</td>
                <td>0.848</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Proximal caries</td>
                <td>0.327</td>
                <td>0.783</td>
                <td>0.883</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Cervical caries or abrasion</td>
                <td>0.444</td>
                <td>0.707</td>
                <td>0.785</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Periapical radiolucency</td>
                <td>0.371</td>
                <td>0.946</td>
                <td>0.556</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Residual root</td>
                <td>0.232</td>
                <td>0.893</td>
                <td>0.873</td>
              </tr>
              <tr valign="top">
                <td colspan="5">
                  <bold>Residual neural network</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Coronal caries or defect</td>
                <td>0.2101</td>
                <td>0.395</td>
                <td>0.876</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Proximal caries</td>
                <td>0.685</td>
                <td>0.377</td>
                <td>0.987</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Cervical caries or abrasion</td>
                <td>0.378</td>
                <td>0.011</td>
                <td>0.996</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Periapical radiolucency</td>
                <td>0.308</td>
                <td>0.883</td>
                <td>0.451</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Residual root</td>
                <td>0.225</td>
                <td>0.744</td>
                <td>0.89</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Strengths and Limitations</title>
        <p>This study has several advantages. The use of panoramic images of individual patients in dentistry is a complex procedure. This study designed a model that could determine 5 types of dental caries by acquiring various panoramic image data and collecting 10,000 pieces of data with various oral structures and dental caries. Therefore, a tooth-related disease determination system with high accuracy and without complex procedures was developed. However, since there is a large deviation in the number of classes for each tooth-related disease, there was a problem in that the learning accuracy was slightly lowered where the number of analysis groups was small. The accuracy of the model is expected to be improved by collecting and supplementing data through continuous updates by using real-time panoramic images uploaded to the web.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>In this study, the tooth-related disease judgment system identified 5 types of tooth-related diseases that are difficult to determine clinically (visually) by using an AI model, and this information was provided on the web to create a system that allows doctors and patients to make real-time judgments. The trained model labeled 5 dental caries through 10,000 panoramic images. Accuracy was compared using Fast R-CNN, ResNet, and inception models, which are good models for detection. Among these models, Fast R-CNN was finally used, which has the highest accuracy. Therefore, Fast R-CNN can be used to shorten the time required for the diagnosis and treatment of dental caries. In addition, by updating the captured panoramic images of patients on the web service developed in this study, the system can acquire new data and further increase the accuracy of diagnosing tooth-related diseases. Additionally, the patient can be aware of the tooth areas where he or she has received treatment, the treatment time, and the type of caries, so that he or she can adjust the schedule for the future dental visit, which will aid in continuous management of dental health. Thus, this study is meaningful as it collects learning data from cases embodied as actual services and implements a prototype-type service based on the collected data. In the future, it will be possible to develop a model for predicting overall oral diseases with panoramic images through additional learning of various dental diseases.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">Fast R-CNN</term>
          <def>
            <p>fast region-based convolutional network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">ResNet</term>
          <def>
            <p>residual neural network</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dash</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Shakyawar</surname>
              <given-names>Sk</given-names>
            </name>
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kaushik</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Big data in healthcare: management, analysis and future prospects</article-title>
          <source>J Big Data</source>
          <year>2019</year>
          <month>6</month>
          <day>19</day>
          <volume>6</volume>
          <issue>1</issue>
          <fpage>54</fpage>
          <pub-id pub-id-type="doi">10.1186/s40537-019-0217-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Gong</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Ye</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Dou</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Private and Secured Medical Data Transmission and Analysis for Wireless Sensing Healthcare System</article-title>
          <source>IEEE Trans. Ind. Inf</source>
          <year>2017</year>
          <month>6</month>
          <volume>13</volume>
          <issue>3</issue>
          <fpage>1227</fpage>
          <lpage>1237</lpage>
          <pub-id pub-id-type="doi">10.1109/TII.2017.2687618</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pirbhulal</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Samuel</surname>
              <given-names>OW</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Sangaiah</surname>
              <given-names>AK</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>A joint resource-aware and medical data security framework for wearable healthcare systems</article-title>
          <source>Future Generation Computer Systems</source>
          <year>2019</year>
          <month>06</month>
          <volume>95</volume>
          <fpage>382</fpage>
          <lpage>391</lpage>
          <pub-id pub-id-type="doi">10.1016/j.future.2019.01.008</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Garcelon</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Neuraz</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Benoit</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Salomon</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Kracker</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Suarez</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Bahi-Buisson</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Hadj-Rabia</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Fischer</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Munnich</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Burgun</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Finding patients using similarity measures in a rare diseases-oriented clinical data warehouse: Dr. Warehouse and the needle in the needle stack</article-title>
          <source>J Biomed Inform</source>
          <year>2017</year>
          <month>09</month>
          <volume>73</volume>
          <fpage>51</fpage>
          <lpage>61</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1532-0464(17)30176-4"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jbi.2017.07.016</pub-id>
          <pub-id pub-id-type="medline">28754522</pub-id>
          <pub-id pub-id-type="pii">S1532-0464(17)30176-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Seong</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Song</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Assessment of innovation policy coordination through the Korean Office of Science, Technology and Innovation</article-title>
          <source>Korea Science</source>
          <year>2013</year>
          <access-date>2022-10-12</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://koreascience.or.kr/article/JAKO201354447932240.jsp%3Fkj=KJKHCF&#38;py=2004&#38;vnc=v7n4&#38;sp=251">http://koreascience.or.kr/article/JAKO201354447932240.jsp%3Fkj=KJKHCF&#38;py= 2004&#38;vnc=v7n4&#38;sp=251</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dixon</surname>
              <given-names>PM</given-names>
            </name>
            <name name-style="western">
              <surname>Tremaine</surname>
              <given-names>WH</given-names>
            </name>
            <name name-style="western">
              <surname>Pickles</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Kuhns</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Hawe</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>McCann</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>McGorum</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Railton</surname>
              <given-names>DI</given-names>
            </name>
            <name name-style="western">
              <surname>Brammer</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Equine dental disease part 1: a long-term study of 400 cases: disorders of incisor, canine and first premolar teeth</article-title>
          <source>Equine Vet J</source>
          <year>1999</year>
          <month>09</month>
          <volume>31</volume>
          <issue>5</issue>
          <fpage>369</fpage>
          <lpage>77</lpage>
          <pub-id pub-id-type="doi">10.1111/j.2042-3306.1999.tb03835.x</pub-id>
          <pub-id pub-id-type="medline">10505951</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Barbazza</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Klazinga</surname>
              <given-names>NS</given-names>
            </name>
            <name name-style="western">
              <surname>Kringos</surname>
              <given-names>DS</given-names>
            </name>
          </person-group>
          <article-title>Exploring the actionability of healthcare performance indicators for quality of care: a qualitative analysis of the literature, expert opinion and user experience</article-title>
          <source>BMJ Qual Saf</source>
          <year>2021</year>
          <month>12</month>
          <volume>30</volume>
          <issue>12</issue>
          <fpage>1010</fpage>
          <lpage>1020</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://qualitysafety.bmj.com/lookup/pmidlookup?view=long&#38;pmid=33963072"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bmjqs-2020-011247</pub-id>
          <pub-id pub-id-type="medline">33963072</pub-id>
          <pub-id pub-id-type="pii">bmjqs-2020-011247</pub-id>
          <pub-id pub-id-type="pmcid">PMC8606459</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Prados-Privado</surname>
              <given-names>María</given-names>
            </name>
            <name name-style="western">
              <surname>García Villalón</surname>
              <given-names>Javier</given-names>
            </name>
            <name name-style="western">
              <surname>Martínez-Martínez</surname>
              <given-names>Carlos Hugo</given-names>
            </name>
            <name name-style="western">
              <surname>Ivorra</surname>
              <given-names>Carlos</given-names>
            </name>
            <name name-style="western">
              <surname>Prados-Frutos</surname>
              <given-names>Juan Carlos</given-names>
            </name>
          </person-group>
          <article-title>Dental Caries Diagnosis and Detection Using Neural Networks: A Systematic Review</article-title>
          <source>J Clin Med</source>
          <year>2020</year>
          <month>11</month>
          <day>06</day>
          <volume>9</volume>
          <issue>11</issue>
          <fpage>3579</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=jcm9113579"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/jcm9113579</pub-id>
          <pub-id pub-id-type="medline">33172056</pub-id>
          <pub-id pub-id-type="pii">jcm9113579</pub-id>
          <pub-id pub-id-type="pmcid">PMC7694692</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>Jae-Hong</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>Do-Hyung</given-names>
            </name>
            <name name-style="western">
              <surname>Jeong</surname>
              <given-names>Seong-Nyum</given-names>
            </name>
            <name name-style="western">
              <surname>Choi</surname>
              <given-names>Seong-Ho</given-names>
            </name>
          </person-group>
          <article-title>Detection and diagnosis of dental caries using a deep learning-based convolutional neural network algorithm</article-title>
          <source>J Dent</source>
          <year>2018</year>
          <month>10</month>
          <volume>77</volume>
          <fpage>106</fpage>
          <lpage>111</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jdent.2018.07.015</pub-id>
          <pub-id pub-id-type="medline">30056118</pub-id>
          <pub-id pub-id-type="pii">S0300-5712(18)30225-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Møystad</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Svanaes</surname>
              <given-names>D B</given-names>
            </name>
            <name name-style="western">
              <surname>van der Stelt</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Gröndahl</surname>
              <given-names>H-g</given-names>
            </name>
            <name name-style="western">
              <surname>Wenzel</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>van Ginkel</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Kullendorff</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Hintze</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Larheim</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Comparison of standard and task-specific enhancement of Digora storage phosphor images for approximal caries diagnosis</article-title>
          <source>Dentomaxillofac Radiol</source>
          <year>2003</year>
          <month>11</month>
          <volume>32</volume>
          <issue>6</issue>
          <fpage>390</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1259/dmfr/76382099</pub-id>
          <pub-id pub-id-type="medline">15070842</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Veena Divya</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Jatti</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Joshi</surname>
              <given-names>R</given-names>
            </name>
            <collab>et al</collab>
          </person-group>
          <article-title>Characterization of dental pathologies using digital panoramic X-ray images based on texture analysis</article-title>
          <year>2017</year>
          <month>07</month>
          <day>11</day>
          <conf-name>Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)</conf-name>
          <conf-date>2017</conf-date>
          <conf-loc>Jeju Island</conf-loc>
          <fpage>592</fpage>
          <lpage>595</lpage>
          <pub-id pub-id-type="doi">10.1109/embc.2017.8036894</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Sehgal</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Automated caries detection based on radon transformation and DCT</article-title>
          <year>2017</year>
          <conf-name>International Conference on Computing, Communication and Networking Technologies (ICCCN)</conf-name>
          <conf-date>July 31</conf-date>
          <conf-loc>Vancouver, Canada</conf-loc>
          <fpage>1</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1109/icccnt.2017.8204030</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kale</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kakodkar</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Shetiya</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Assessment of mother's ability in caries diagnosis, utilizing the smartphone photographic method</article-title>
          <source>J Indian Soc Pedod Prev Dent</source>
          <year>2019</year>
          <volume>37</volume>
          <issue>4</issue>
          <fpage>360</fpage>
          <pub-id pub-id-type="doi">10.4103/jisppd.jisppd_349_18</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Geetha</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Aprameya</surname>
              <given-names>KS</given-names>
            </name>
            <name name-style="western">
              <surname>Hinduja</surname>
              <given-names>DM</given-names>
            </name>
          </person-group>
          <article-title>Dental caries diagnosis in digital radiographs using back-propagation neural network</article-title>
          <source>Health Inf Sci Syst</source>
          <year>2020</year>
          <month>12</month>
          <volume>8</volume>
          <issue>1</issue>
          <fpage>8</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/31949895"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s13755-019-0096-y</pub-id>
          <pub-id pub-id-type="medline">31949895</pub-id>
          <pub-id pub-id-type="pii">96</pub-id>
          <pub-id pub-id-type="pmcid">PMC6942116</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bayraktar</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ayan</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Diagnosis of interproximal caries lesions with deep convolutional neural network in digital bitewing radiographs</article-title>
          <source>Clin Oral Investig</source>
          <year>2022</year>
          <month>01</month>
          <volume>26</volume>
          <issue>1</issue>
          <fpage>623</fpage>
          <lpage>632</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/34173051"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s00784-021-04040-1</pub-id>
          <pub-id pub-id-type="medline">34173051</pub-id>
          <pub-id pub-id-type="pii">10.1007/s00784-021-04040-1</pub-id>
          <pub-id pub-id-type="pmcid">PMC8232993</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>JH</given-names>
            </name>
            <collab>et al</collab>
          </person-group>
          <article-title>Evaluation of national health insurance coverage of periodontal scaling: A nationwide cohort study in Korea</article-title>
          <source>Journal of the Korean Dental Association</source>
          <year>2016</year>
          <access-date>2022-10-12</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://koreascience.kr/article/JAKO201664558247211.page">https://koreascience.kr/article/JAKO201664558247211.page</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Cho</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Comparative Analysis of Dental Caries Detection Technologies based on Computer-aided Diagnosis System</article-title>
          <source>KIEE</source>
          <year>2019</year>
          <month>02</month>
          <day>28</day>
          <volume>68</volume>
          <issue>2</issue>
          <fpage>350</fpage>
          <lpage>358</lpage>
          <pub-id pub-id-type="doi">10.5370/kiee.2019.68.2.350</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ezhov</surname>
              <given-names>Matvey</given-names>
            </name>
            <name name-style="western">
              <surname>Gusarev</surname>
              <given-names>Maxim</given-names>
            </name>
            <name name-style="western">
              <surname>Golitsyna</surname>
              <given-names>Maria</given-names>
            </name>
            <name name-style="western">
              <surname>Yates</surname>
              <given-names>Julian M</given-names>
            </name>
            <name name-style="western">
              <surname>Kushnerev</surname>
              <given-names>Evgeny</given-names>
            </name>
            <name name-style="western">
              <surname>Tamimi</surname>
              <given-names>Dania</given-names>
            </name>
            <name name-style="western">
              <surname>Aksoy</surname>
              <given-names>Secil</given-names>
            </name>
            <name name-style="western">
              <surname>Shumilov</surname>
              <given-names>Eugene</given-names>
            </name>
            <name name-style="western">
              <surname>Sanders</surname>
              <given-names>Alex</given-names>
            </name>
            <name name-style="western">
              <surname>Orhan</surname>
              <given-names>Kaan</given-names>
            </name>
          </person-group>
          <article-title>Clinically applicable artificial intelligence system for dental diagnosis with CBCT</article-title>
          <source>Sci Rep</source>
          <year>2021</year>
          <month>07</month>
          <day>22</day>
          <volume>11</volume>
          <issue>1</issue>
          <fpage>15006</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-021-94093-9"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-021-94093-9</pub-id>
          <pub-id pub-id-type="medline">34294759</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-021-94093-9</pub-id>
          <pub-id pub-id-type="pmcid">PMC8298426</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>White</surname>
              <given-names>SC</given-names>
            </name>
            <name name-style="western">
              <surname>Pharaoh</surname>
              <given-names>MJ</given-names>
            </name>
          </person-group>
          <source>Oral Radiology-E-Book: Principles and Interpretation</source>
          <year>1982</year>
          <publisher-loc>St Louis, Missouri</publisher-loc>
          <publisher-name>Elsevier Health Sciences</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Girshick</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Fast R-CNN</article-title>
          <year>2015</year>
          <conf-name>2015 IEEE International Conference on Computer Vision (ICCV)</conf-name>
          <conf-date>December 7-13</conf-date>
          <conf-loc>Santiago, Chile</conf-loc>
          <pub-id pub-id-type="doi">10.1109/ICCV.2015.169</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ray</surname>
              <given-names>PP</given-names>
            </name>
            <name name-style="western">
              <surname>Dash</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>De</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Edge computing for Internet of Things: A survey, e-healthcare case study and future direction</article-title>
          <source>Journal of Network and Computer Applications</source>
          <year>2019</year>
          <month>08</month>
          <volume>140</volume>
          <fpage>1</fpage>
          <lpage>22</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jnca.2019.05.005</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Szegedy</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ioffe</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Vanhoucke</surname>
              <given-names>V</given-names>
            </name>
            <collab>et al</collab>
          </person-group>
          <article-title>Inception-v4, inception-ResNet and the impact of residual connections on learning</article-title>
          <year>2017</year>
          <conf-name>AAAI'17: Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence</conf-name>
          <conf-date>February 4-9</conf-date>
          <conf-loc>San Francisco, California</conf-loc>
          <pub-id pub-id-type="doi">10.1609/aaai.v31i1.11231</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Poggio</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Mhaskar</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Rosasco</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Miranda</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Liao</surname>
              <given-names>Q</given-names>
            </name>
          </person-group>
          <article-title>Why and when can deep-but not shallow-networks avoid the curse of dimensionality: A review</article-title>
          <source>Int. J. Autom. Comput</source>
          <year>2017</year>
          <month>3</month>
          <day>14</day>
          <volume>14</volume>
          <issue>5</issue>
          <fpage>503</fpage>
          <lpage>519</lpage>
          <pub-id pub-id-type="doi">10.1007/s11633-017-1054-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Szegedy</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Rethinking the inception architecture for computer vision</article-title>
          <year>2016</year>
          <month>06</month>
          <day>27</day>
          <conf-name>Proceedings of the IEEE conference on computer vision and pattern recognition</conf-name>
          <conf-date>2016</conf-date>
          <conf-loc>Las Vegas, NV, USA</conf-loc>
          <pub-id pub-id-type="doi">10.1109/cvpr.2016.308</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Guan</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Wan</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Ping</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Xiang</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Deep convolutional neural network Inception-v3 model for differential diagnosing of lymph node in cytological images: a pilot study</article-title>
          <source>Ann Transl Med</source>
          <year>2019</year>
          <month>07</month>
          <volume>7</volume>
          <issue>14</issue>
          <fpage>307</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.21037/atm.2019.06.29"/>
          </comment>
          <pub-id pub-id-type="doi">10.21037/atm.2019.06.29</pub-id>
          <pub-id pub-id-type="medline">31475177</pub-id>
          <pub-id pub-id-type="pii">atm-07-14-307</pub-id>
          <pub-id pub-id-type="pmcid">PMC6694266</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Flanagan</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <source>Java-Script: The Definitive Guide</source>
          <access-date>2022-10-12</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://pepa.holla.cz/wp-content/uploads/2016/08/JavaScript-The-Definitive-Guide-6th-Edition.pdf">https://pepa.holla.cz/wp-content/uploads/2016/08/JavaScript-The -Definitive-Guide-6th-Edition.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Prendinger</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Igarashi</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Communicating emotions in online chat using physiological sensors and animated text</article-title>
          <source>Human Factors in Computing Systems</source>
          <year>2004</year>
          <fpage>1171</fpage>
          <pub-id pub-id-type="doi">10.1145/985921.986016</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>Performance evaluation of transparent persistence layer in Java applications</article-title>
          <year>2010</year>
          <month>10</month>
          <day>10</day>
          <conf-name>International Conference on Cyber-Enabled Distributed Computing and Knowledge Discovery</conf-name>
          <conf-date>October 10</conf-date>
          <conf-loc>Huangshan, China</conf-loc>
          <fpage>21</fpage>
          <lpage>26</lpage>
          <pub-id pub-id-type="doi">10.1109/cyberc.2010.15</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Saimi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Symora</surname>
              <given-names>T</given-names>
            </name>
            <collab>et al</collab>
          </person-group>
          <article-title>Presentation layer framework of web application systems with server-side Java technology</article-title>
          <year>2010</year>
          <month>10</month>
          <day>06</day>
          <conf-name>Proceedings of the 24th Annual International Computer Software and Applications Conference</conf-name>
          <conf-date>October 6</conf-date>
          <conf-loc>Taipei, Taiwan</conf-loc>
          <pub-id pub-id-type="doi">10.1109/cmpsac.2000.884769</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Boicea</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Radulescu</surname>
              <given-names>F</given-names>
            </name>
            <collab>et al</collab>
          </person-group>
          <article-title>MongoDB vs Oracle: database comparison</article-title>
          <year>2012</year>
          <conf-name>Third International Conference on Emerging Intelligent Data and Web Technologies</conf-name>
          <conf-date>September 21</conf-date>
          <conf-loc>Bucharest, Romania</conf-loc>
          <fpage>330</fpage>
          <lpage>335</lpage>
          <pub-id pub-id-type="doi">10.1109/eidwt.2012.32</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="web">
          <article-title>Resnet in Resnet: generalizing residual architectures</article-title>
          <source>Deep AI</source>
          <access-date>2022-10-12</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://deepai.org/publication/resnet-in-resnet-generalizing-residual-architectures">https://deepai.org/publication/resnet-in-resnet- generalizing-residual-architectures</ext-link>
          </comment>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
