<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMI</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Med Inform</journal-id>
      <journal-title>JMIR Medical Informatics</journal-title>
      <issn pub-type="epub">2291-9694</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v10i8e37658</article-id>
      <article-id pub-id-type="pmid">36001363</article-id>
      <article-id pub-id-type="doi">10.2196/37658</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Predicting Abnormalities in Laboratory Values of Patients in the Intensive Care Unit Using Different Deep Learning Models: Comparative Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Lovis</surname>
            <given-names>Christian</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Turbe</surname>
            <given-names>Hugues</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Sükei</surname>
            <given-names>Emese</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes" equal-contrib="yes">
          <name name-style="western">
            <surname>Ayad</surname>
            <given-names>Ahmad</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Chair of Information Theory and Data Analytics</institution>
            <institution>Rheinisch-Westfälische Technische Hochschule Aachen</institution>
            <addr-line>Kopernikusstraße 16</addr-line>
            <addr-line>Aachen, 52074</addr-line>
            <country>Germany</country>
            <phone>49 (241) 80 20750</phone>
            <email>ahmad.ayad@inda.rwth-aachen.de</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-9081-1274</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Hallawa</surname>
            <given-names>Ahmed</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-3932-4873</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Peine</surname>
            <given-names>Arne</given-names>
          </name>
          <degrees>Dr med, MHBA</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-4163-2402</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Martin</surname>
            <given-names>Lukas</given-names>
          </name>
          <degrees>Priv-Doz, Dr med, MHBA</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-8650-5090</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Fazlic</surname>
            <given-names>Lejla Begic</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-9869-0219</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Dartmann</surname>
            <given-names>Guido</given-names>
          </name>
          <degrees>Prof Dr-Ing</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-6786-6664</ext-link>
        </contrib>
        <contrib id="contrib7" contrib-type="author">
          <name name-style="western">
            <surname>Marx</surname>
            <given-names>Gernot</given-names>
          </name>
          <degrees>Prof Dr med, FRCA</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-0866-4234</ext-link>
        </contrib>
        <contrib id="contrib8" contrib-type="author">
          <name name-style="western">
            <surname>Schmeink</surname>
            <given-names>Anke</given-names>
          </name>
          <degrees>Prof Dr-Ing</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-9929-2925</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Chair of Information Theory and Data Analytics</institution>
        <institution>Rheinisch-Westfälische Technische Hochschule Aachen</institution>
        <addr-line>Aachen</addr-line>
        <country>Germany</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Department of Intensive Care and Intermediate Care</institution>
        <institution>University Hospital Rheinisch-Westfälische Technische Hochschule Aachen</institution>
        <addr-line>Aachen</addr-line>
        <country>Germany</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Fachbereich Umweltplanung/Umwelttechnik - Fachrichtung Informatik</institution>
        <institution>Trier University of Applied Sciences</institution>
        <addr-line>Trier</addr-line>
        <country>Germany</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Ahmad Ayad <email>ahmad.ayad@inda.rwth-aachen.de</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <month>8</month>
        <year>2022</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>24</day>
        <month>8</month>
        <year>2022</year>
      </pub-date>
      <volume>10</volume>
      <issue>8</issue>
      <elocation-id>e37658</elocation-id>
      <history>
        <date date-type="received">
          <day>1</day>
          <month>3</month>
          <year>2022</year>
        </date>
        <date date-type="rev-request">
          <day>11</day>
          <month>4</month>
          <year>2022</year>
        </date>
        <date date-type="rev-recd">
          <day>5</day>
          <month>6</month>
          <year>2022</year>
        </date>
        <date date-type="accepted">
          <day>12</day>
          <month>6</month>
          <year>2022</year>
        </date>
      </history>
      <copyright-statement>©Ahmad Ayad, Ahmed Hallawa, Arne Peine, Lukas Martin, Lejla Begic Fazlic, Guido Dartmann, Gernot Marx, Anke Schmeink. Originally published in JMIR Medical Informatics (https://medinform.jmir.org), 24.08.2022.</copyright-statement>
      <copyright-year>2022</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Informatics, is properly cited. The complete bibliographic information, a link to the original publication on https://medinform.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://medinform.jmir.org/2022/8/e37658" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>In recent years, the volume of medical knowledge and health data has increased rapidly. For example, the increased availability of electronic health records (EHRs) provides accurate, up-to-date, and complete information about patients at the point of care and enables medical staff to have quick access to patient records for more coordinated and efficient care. With this increase in knowledge, the complexity of accurate, evidence-based medicine tends to grow all the time. Health care workers must deal with an increasing amount of data and documentation. Meanwhile, relevant patient data are frequently overshadowed by a layer of less relevant data, causing medical staff to often miss important values or abnormal trends and their importance to the progression of the patient’s case.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>The goal of this work is to analyze the current laboratory results for patients in the intensive care unit (ICU) and classify which of these lab values could be abnormal the next time the test is done. Detecting near-future abnormalities can be useful to support clinicians in their decision-making process in the ICU by drawing their attention to the important values and focus on future lab testing, saving them both time and money. Additionally, it will give doctors more time to spend with patients, rather than skimming through a long list of lab values.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We used Structured Query Language to extract 25 lab values for mechanically ventilated patients in the ICU from the MIMIC-III and eICU data sets. Additionally, we applied time-windowed sampling and holding, and a support vector machine to fill in the missing values in the sparse time series, as well as the Tukey range to detect and delete anomalies. Then, we used the data to train 4 deep learning models for time series classification, as well as a gradient boosting–based algorithm and compared their performance on both data sets.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The models tested in this work (deep neural networks and gradient boosting), combined with the preprocessing pipeline, achieved an accuracy of at least 80% on the multilabel classification task. Moreover, the model based on the multiple convolutional neural network outperformed the other algorithms on both data sets, with the accuracy exceeding 89%.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>In this work, we show that using machine learning and deep neural networks to predict near-future abnormalities in lab values can achieve satisfactory results. Our system was trained, validated, and tested on 2 well-known data sets to ensure that our system bridged the reality gap as much as possible. Finally, the model can be used in combination with our preprocessing pipeline on real-life EHRs to improve patients’ diagnosis and treatment.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>anomaly detection</kwd>
        <kwd>DNN</kwd>
        <kwd>time series classification</kwd>
        <kwd>lab values</kwd>
        <kwd>ICU</kwd>
        <kwd>CNN</kwd>
        <kwd>medical Informatics</kwd>
        <kwd>EHR</kwd>
        <kwd>machine learning</kwd>
        <kwd>lightGBM</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Background</title>
        <p>Machine learning and data analysis methods are used for diverse applications, such as anomaly detection [<xref ref-type="bibr" rid="ref1">1</xref>], text classification [<xref ref-type="bibr" rid="ref2">2</xref>], image segmentation [<xref ref-type="bibr" rid="ref3">3</xref>], and time series forecasting [<xref ref-type="bibr" rid="ref4">4</xref>]. One of the fields in which machine learning has become extremely popular recently is medicine. In medicine, there are now other application due to the improved availability of data. In particular, medical images [<xref ref-type="bibr" rid="ref5">5</xref>] and electronic health records (EHRs) [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref7">7</xref>] represent prominent examples here. Much research has been done on medical images to detect diseases, such as pneumonia [<xref ref-type="bibr" rid="ref8">8</xref>], which was driven by the advancements in computer vision. In addition, EHRs enabled the use of machine learning models to perform many tasks, such as predicting hospital length of stay [<xref ref-type="bibr" rid="ref9">9</xref>] and mortality in septic patients [<xref ref-type="bibr" rid="ref10">10</xref>]. In these studies, the authors used EHRs to train their machine learning models. However, EHRs have so much more data that with the right tools, they can support many valuable applications.</p>
        <p>In this study, we consider the treatment of critically ill patients in the intensive care unit (ICU). Throughout the treatment of these patients, laboratory data are regularly gathered. Due to the substantial number of values to be monitored in the ICU, which sometimes can be more than 100 lab tests [<xref ref-type="bibr" rid="ref11">11</xref>], important anomalies or trends may not be noticed. This can lead to suboptimal treatment strategies and complications in the patient’s case. For example, early changes in lab values for patients with COVID-19 are important predictors of mortality [<xref ref-type="bibr" rid="ref12">12</xref>]. The correct analysis of laboratory anomalies can direct treatment strategies, particularly in the early detection of potentially life-threatening cases. This should aid in resource allocation and save lives by allowing for timely intervention. Furthermore, health care workers spend 30%-50% of their time in front of computers and must deal with a mass of patient data [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>]. Any savings in that time can free them to spend more time with patients.</p>
      </sec>
      <sec>
        <title>Prior Work</title>
        <p>Because of the recent availability of big data in the medical field, especially EHRs, there has been a growing interest in applying machine learning tools for medical applications. Working with medical data from EHRs can be quite challenging due to the inconsistent sampling of lab measurements, high frequency of missing values, and presence of noisy data. Additionally, there is no standardized way to process medical data before applying machine learning algorithms on them. Nevertheless, many authors have managed to process the data and apply machine learning algorithms for medical sequence modeling. Authors [<xref ref-type="bibr" rid="ref15">15</xref>] have developed a masked, self-attention mechanism that uses positional encoding and dense interpolation strategies for incorporating temporal order. The authors trained and tested their model on the MIMIC-III data set and achieved better performance on them compared to recurrent neural networks (RNNs). The benchmarking tasks include predicting mortality (classification), length of stay (regression), phenotyping (multilabel classification), and decompensation (time series classification) [<xref ref-type="bibr" rid="ref16">16</xref>]. Although the benchmarking tasks include a classification task, none of these tasks include lab values or the modeling of irregularly sampled sequences with large amounts of sparse data. The benchmark is created to compare different machine learning models on a specific type of medical data extracted from the MIMIC-III data set and covers only cover only 4 tasks. However, MIMIC-III has much more data that can allow for performing many more tasks like the one in this study.</p>
        <p>There has also been some work that compares different approaches and machine learning algorithms for learning from irregularly sampled time series, which is mostly the case in medicine. For example, authors [<xref ref-type="bibr" rid="ref17">17</xref>] compare modeling primitives that allow learning from the different forms of irregular time series, such as discretization, interpolation, recurrence, attention, and structural invariance. The authors discuss the pros and cons of each of these modeling primitives and the tasks for which they are suited. Another study [<xref ref-type="bibr" rid="ref18">18</xref>] used a recurrence-based approach using specific versions of RNNs called gated recurrent units (GRUs) and discussed the advantages of using it instead of the other approaches. Additionally, authors [<xref ref-type="bibr" rid="ref19">19</xref>] have proposed a system for early detection of sepsis using an interpolation-based method for data imputation followed by using temporal convolutional networks (TCNs) and dynamic time warping. The authors used a multitask gaussian process for multichannel data imputation and later used a TCN model to predict the probability of a sepsis diagnosis in the future. The authors proved that their proposed algorithm outperforms the state-of-the-art algorithm for sepsis detection. In contrast, we use a discretization-based approach followed by data imputation to convert the irregularly sampled time series to a regularly sampled one, as it provides an easy way to understand, debug, and implement a framework to deal with sensitive lab values that can be generalized effectively to other EHRs.</p>
      </sec>
      <sec>
        <title>Goal of This Study</title>
        <p>This work’s objective is to analyze laboratory results (lab values) of patients in the ICU and classify which of these lab values are predicted to be out of the normal range soon (the next time these tests are done) and which are predicted to be normal. This allows health workers to focus on these laboratory values, their significance, their relation to the patient's current case, and their impact on the patient's future condition. This can potentially lead to reducing the length of the ICU stay and mortality [<xref ref-type="bibr" rid="ref20">20</xref>]. Moreover, health care workers can focus future testing on these lab values and not waste time and resources on unnecessary tests that constitute approximately 50% of the tests ordered in the ICU [<xref ref-type="bibr" rid="ref21">21</xref>]. Finally, it will allow the medical staff to reduce the time they need to check all the lab values and focus on the relevant ones, giving them more time to spend with patients [<xref ref-type="bibr" rid="ref14">14</xref>].</p>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Problem Definition</title>
        <p>The task at hand is to predict which lab values will be normal and which will be abnormal in future, for a given period of ICU stay. The input data contain the patients' demographics and numerical lab values from the moment they were admitted till the end of their stay. The output is a binary vector, where each number represents the likelihood of a specific lab value to be abnormal (1) or normal (0) in the next 4 hours. Therefore, our problem is a “many to one” or a multilabel classification problem. Moreover, we have chosen the 4-hour time window because the majority of lab values found in MIMIC-III and eICU are recorded every 4 hours. Therefore, using this time step will introduce the least amount of data artifacts, especially considering that the changes in lab values are not noticeable for smaller time frames (like 1 hour). The same time window for lab values has been used by other authors [<xref ref-type="bibr" rid="ref22">22</xref>]. Finally, the general diagram of the system is shown in <xref rid="figure1" ref-type="fig">Figure 1</xref>.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Overall abnormality detection system in practice. DNN: deep neural network.</p>
          </caption>
          <graphic xlink:href="medinform_v10i8e37658_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Data and Cohort Definition</title>
        <p>The data used to train, validate, and test the different prediction models are derived from the MIMIC-III database. It is a database that contains data from 31,532 unique ICU stays of patients who stayed within the ICUs at the Beth Israel Deaconess Medical Center [<xref ref-type="bibr" rid="ref6">6</xref>] between 2001 and 2012. We also used data derived from the eICU Collaborative Research Database [<xref ref-type="bibr" rid="ref7">7</xref>]. It is a multicenter database for critical care research created by The Philips eICU program. It contains data on 200,859 ICU stays from 335 ICUs units in the United States of America. In both databases, a unique ICU stay ID is associated with every unique ICU admission.</p>
        <p>Our cohort focuses on mechanically ventilated patients in the ICU. This cohort is truly relevant these days because of the COVID-19 virus that caused a sharp increase in the number of patients in the ICU receiving mechanical ventilation. For these patients, it is vital to know which set of lab values have abnormal trends and focus on them, as it has a direct relation to how the case will develop [<xref ref-type="bibr" rid="ref12">12</xref>]. The same cohort was used in a previous work focused on dynamically optimizing mechanical ventilation in critical care using reinforcement learning [<xref ref-type="bibr" rid="ref22">22</xref>]. Using this cohort, we extracted 25,086 eICU and 11,943 MIMIC-III ICU stays with mechanical ventilation events. The duration of the ICU patients' stays ranges from 12 h to 72 h in 4-hour time steps. Patient demographics and clinical characteristics are shown in <xref ref-type="table" rid="table1">Table 1</xref>.</p>
        <p>The input data consist of 3 demographic features (age, sex, weight) and 25 lab values (white blood cell count, PaCO<sub>2</sub>, hemoglobin, etc). The lab values chosen are the most relevant to the mechanically ventilated patients, as shown by the medical team members from the university hospital of Rheinisch Westfälische Technische Hochschule (RWTH) Aachen in their previous work [<xref ref-type="bibr" rid="ref22">22</xref>]. In <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>, the chosen features from the MIMIC-III and eICU data sets are listed along with their means and SDs.</p>
        <p>The output is a binary vector of length 25. To convert numerical lab values to binary values, we used the reference ranges followed by the American College of Physicians [<xref ref-type="bibr" rid="ref23">23</xref>]. Finally, the queries of Structured Query Language (SQL) used to extract the cohort data from both databases are included in the Git repository [<xref ref-type="bibr" rid="ref24">24</xref>].</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Clinical and demographic properties of the study population [<xref ref-type="bibr" rid="ref16">16</xref>].</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="460"/>
            <col width="240"/>
            <col width="300"/>
            <thead>
              <tr valign="top">
                <td>Property</td>
                <td>MIMIC-III data set</td>
                <td>eICU data set</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Number of ICUs<sup>a</sup></td>
                <td>5</td>
                <td>335</td>
              </tr>
              <tr valign="top">
                <td>Data acquisition timespan</td>
                <td>2001-2012</td>
                <td>2014-2015</td>
              </tr>
              <tr valign="top">
                <td>Number of included patients (N)</td>
                <td>11,443</td>
                <td>23,699</td>
              </tr>
              <tr valign="top">
                <td>Age (years), median (IQR)</td>
                <td>66.9 (56.3-77.5)</td>
                <td>65.0 (54-74)</td>
              </tr>
              <tr valign="top">
                <td>Body weight in kg, mean (SD)</td>
                <td>85.7 (18.1)</td>
                <td>83.5 (22.0)</td>
              </tr>
              <tr valign="top">
                <td>Sex, female, n(%)</td>
                <td>4329 (36.3%)</td>
                <td>10,546 (42%)</td>
              </tr>
              <tr valign="top">
                <td>Sex, male, n (%)</td>
                <td>7614 (63.7%)</td>
                <td>14,540 (58%)</td>
              </tr>
              <tr valign="top">
                <td>In-hospital mortality, %</td>
                <td>11.1</td>
                <td>13.2</td>
              </tr>
              <tr valign="top">
                <td>LOS<sup>b</sup> in ICU (days), median (IQR)</td>
                <td>3.1 (1.6-6.1)</td>
                <td>3.0 (1.71-5.9)</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>ICU: intensive care unit.</p>
            </fn>
            <fn id="table1fn2">
              <p><sup>b</sup>LOS: length of stay.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Preprocessing</title>
        <p>The patients’ raw data extracted from the MIMIC-III and eICU data sets were very sparse and had several missing values. Therefore, it was necessary to perform preprocessing to prepare the data for the machine learning pipeline. First, the time-windowed sample-and-hold method was used to handle missing values. In this method, the data sample is held (repeated) until the next available data sample or the maximum hold time is reached. For each feature, we conducted a frequency analysis to determine how often a new measurement is produced. The counts of consecutive measurement time differences are obtained and when their cumulative sum exceeds a threshold, the first value where this occurs is taken as the hold time. When the feature's hold time exceeds this maximum, the data point is considered corrupted [<xref ref-type="bibr" rid="ref25">25</xref>]. For the rest of the missing values, a k-nearest neighbor imputation with singular value decomposition and mean imputation were used [<xref ref-type="bibr" rid="ref26">26</xref>]. Any ICU stay that had more than 50% missing data was discarded (occurrence &#60;1% in the overall cohort) [<xref ref-type="bibr" rid="ref22">22</xref>]. Finally, the Tukey range test was used to detect and delete outliers. The preprocessing steps are explained in detail in the Git repository [<xref ref-type="bibr" rid="ref24">24</xref>].</p>
      </sec>
      <sec>
        <title>Prediction System Overview</title>
        <p>The overall system architecture used for predicting abnormalities in patients' lab values is shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>. After performing the preprocessing steps explained earlier, the output time series will be separated into two main types: demographics and lab values. Each ICU stay will be split into multiple shorter sequences using the moving window technique. <xref rid="figure3" ref-type="fig">Figure 3</xref> presents an example of an ICU stay of length L=11 (44 hours). Here,  <italic>X<sub>m</sub></italic> represents the patient's input data vector at time step  <italic>m</italic> ∈ <inline-graphic xlink:href="medinform_v10i8e37658_fig11.png" xlink:type="simple" mimetype="image"/><sup>+</sup>, and  <italic>Y<sub>m</sub></italic> represents the patient's output binary vector. For a window size <italic>W</italic> ∈ <inline-graphic xlink:href="medinform_v10i8e37658_fig11.png" xlink:type="simple" mimetype="image"/><sup>+</sup> of 8, we have 3 subsequences extracted from the stay. For example,  <italic>W<sub>1</sub></italic> includes the input vectors [<italic>X<sub>0</sub>:X<sub>7</sub></italic>] and the output binary vector <italic>Y<sub>8</sub></italic>. The process of the moving window is applied to ICU stays in the data sets (MIMIC-III, eICU). Then, the resulting subsequences are shuffled and used to train, validate, and test the different machine learning models that we have experimented with, as shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>. This means the windowed subsequences from the same ICU stay can be distributed across the training, validation, and testing sets. Moreover, we experimented with different window sizes between <italic>W=5</italic> and <italic>W=10</italic> and chose the one that gave us the best results for all the models, as explained in the Results section.</p>
        <p>We experimented with predicting the exact numerical lab values (regression problem) and then converting the predicted output to a binary vector after comparing the values with the normal ranges. The models were then trained to minimize the minimum squared error loss. The results were 10%-20% worse than those obtained when predicting the output binary vector directly and optimizing for the binary cross-entropy loss. Therefore, we selected this system model.</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Overall system model used in our study when trained on the MIMIC-III data set and tested on the eICU data set. ICU: intensive care unit; Sigmoid is an activation function; L: lab value; t: time step.</p>
          </caption>
          <graphic xlink:href="medinform_v10i8e37658_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Moving window technique to extract sequences from intensive care unit stays. X and Y represent the input and output data respectively; W represents the windows extracted from the input sequences.</p>
          </caption>
          <graphic xlink:href="medinform_v10i8e37658_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Prediction Models</title>
        <p>The goal of the prediction model in our scenario is to predict abnormalities in laboratory values for a given input sequence. The machine learning problem is a multilabel classification problem because multiple lab values are classified as normal or abnormal at the same time (multiclass) and more than 1 lab value can be abnormal at the same time (multilabel). We experimented with four current deep learning (DL) approaches: long short-term memory (LSTM), self-attention with time encoding (transformer architecture), convolutional neural network (CNN), and TCN. In the following subsections, each model architecture is discussed briefly. The models are explained in more detail in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref> [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref27">27</xref>-<xref ref-type="bibr" rid="ref39">39</xref>].</p>
        <sec>
          <title>LSTM models</title>
          <p>LSTM is a type of RNN that has the ability to learn from long sequences of data. A typical LSTM layer in a DL model consists of multiple LSTM cells. Another similar yet simpler cell structure is called GRU [<xref ref-type="bibr" rid="ref4">4</xref>]. We experimented with both cell types in our model and chose LSTM because it performed better. The architecture used in our experiment is shown in <xref rid="figure4" ref-type="fig">Figure 4</xref>. All the lab values will be input to the LSTM block to learn from the sequential data. Each LSTM block includes an LSTM layer, which has “tanh” as the built-in activation function. Then comes a batch normalization layer after the sequential data pass through the layers, and these data will be concatenated with the demographic features. The concatenated data will then go through a stack of fully connected layers ending with a last dense layer that has a sigmoid activation function. During forward propagation, the output probabilities will be compared to a threshold to produce the binary labels that are used to calculate the loss and other evaluation metrics.</p>
          <fig id="figure4" position="float">
            <label>Figure 4</label>
            <caption>
              <p>LSTM architecture used in our experiments. LSTM: long short-term memory; ReLU: rectified linear unit; Tanh, ReLU and Sigmoid are activation functions.</p>
            </caption>
            <graphic xlink:href="medinform_v10i8e37658_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>CNN models</title>
          <p>CNNs learn to optimize their kernels to extract information from input data in a successive manner. Additionally, they work well on time series forecasting and classification problems [<xref ref-type="bibr" rid="ref27">27</xref>], often outperforming LSTMs in terms of the total training time in a more computationally efficient manner [<xref ref-type="bibr" rid="ref28">28</xref>]. In our case, we used a 1D multiple CNN (M-CNN), where the kernels (filters) move along the time axis performing convolution operations on all features. The kernel size defines how many time steps 1 kernel covers at any point in time.</p>
          <p>Aside from the normal CNN that takes 1 input stream, we developed an architecture that takes 2 streams of the input sequences in parallel. Each stream will be processed with different filters. This ensures that we capture short-term dependencies in the sequences as well as long-term ones. The network architecture is shown in <xref rid="figure5" ref-type="fig">Figure 5</xref>.</p>
          <fig id="figure5" position="float">
            <label>Figure 5</label>
            <caption>
              <p>Multiple convolutional neural network model architecture used in our experiments. Conv1D: 1D convolutional layer; LeakyReLU: leaky rectified linear unit; ReLU: rectified linear unit; Sigmoid, LeakyReLU, and ReLU are activation functions.</p>
            </caption>
            <graphic xlink:href="medinform_v10i8e37658_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>Transformer models</title>
          <p>Transformers are a recent neural network architecture derived from the attention mechanism first proposed in an earlier study [<xref ref-type="bibr" rid="ref29">29</xref>]. The mechanism was designed initially for translation tasks, which were earlier accomplished using RNNs.</p>
          <p>Transformers typically use a collection of superimposed sinusoidal functions to represent the position of words in natural language processing tasks. However, in time series tasks, we need to attach the meaning of time to our input. Authors [<xref ref-type="bibr" rid="ref30">30</xref>] have introduced a method where each input feature is represented as a linear component and a periodic component. The result at the end will be a learned vector representation of time steps that will be concatenated with the input data before the attention layers. The model architecture we developed is shown in <xref rid="figure6" ref-type="fig">Figure 6</xref>.</p>
          <fig id="figure6" position="float">
            <label>Figure 6</label>
            <caption>
              <p>Transformer architecture used in our experiments. Conv1D: 1D convolutional layer; Time2Vec: time to vector transformation; ReLU: rectified linear unit; ReLU and Sigmoid are activation functions.</p>
            </caption>
            <graphic xlink:href="medinform_v10i8e37658_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>TCN models</title>
          <p>TCNs were first introduced for video-based action segmentation [<xref ref-type="bibr" rid="ref31">31</xref>]. Not long after that, they were used for sequence modeling tasks like the detection of sepsis [<xref ref-type="bibr" rid="ref19">19</xref>]. A TCN differs from a conventional CNN in 2 ways; first, a TCN can take a sequence of any length and output a sequence of the same length using 0 padding; second, a TCN performs causal convolution. In general, TCNs are advantageous because they can be trained in parallel with less memory unlike RNNs. Additionally, they support variable length inputs and can easily replace any existing RNN. <xref rid="figure7" ref-type="fig">Figure 7</xref> shows the TCN architecture that we designed and used in our experiments.</p>
          <fig id="figure7" position="float">
            <label>Figure 7</label>
            <caption>
              <p>TCN architecture used in our experiments. LeakyReLU: leaky rectified linear unit; ReLU: rectified linear unit; TCN: temporal convolutional network; LeakyReLU, ReLU, and Sigmoid are activation functions.</p>
            </caption>
            <graphic xlink:href="medinform_v10i8e37658_fig7.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
      </sec>
      <sec>
        <title>Evaluation Metrics</title>
        <p>In our work, we predicted the output binary vector of the future time step rather than the actual numerical lab values. We tried training the models as regression models predicting the actual numerical values and minimizing the minimum squared error. Then, we converted the predicted numerical output to binary vectors using the recommended ranges. However, we received better results when we treated the models as multilabel, multiclass classifiers predicting the binary vectors directly. Therefore, the evaluation metrics we used are binary accuracy, precision, recall, and F1 score.</p>
      </sec>
      <sec>
        <title>Evaluation Setup</title>
        <p>As we were predicting multiple lab values at the same time and all the classes were of equal importance, we used micro-averaging to calculate the accuracy, precision, recall, and F1 globally. These evaluation metrics were used to evaluate the models' training, validation, and testing. Additionally, to compare the models, the following points were followed: First, the models' architectures and hyperparameters were optimized using the Keras Tuner library [<xref ref-type="bibr" rid="ref40">40</xref>] to ensure that the models performed at their best. Second, the models were trained to optimize the binary cross-entropy loss [<xref ref-type="bibr" rid="ref41">41</xref>]. Third, early stopping was used to stop the model's training once the validation loss did not change by 0.01 for 10 consecutive epochs. This reduces the chances of model overfitting. Fourth, we set the seed for all the random processes during model training to ensure replicability of our results. Finally, we used the same threshold (TH=0.5) and same window size (sequence length=6) for all the models to ensure a fair comparison. We used 0 padding for sequences shorter than 6 time steps (ICU stay length&#60;24 hours). Moreover, we implemented a gradient boosting–based method (LightGBM) for comparison with DL-based methods. LightGBM is one of the best performing non-DL–based algorithms that is shown to perform well on time series classification tasks [<xref ref-type="bibr" rid="ref32">32</xref>].</p>
        <p>We experimented with 2 approaches for training the models. In the first approach, we trained the models and validated them on the MIMIC-III data set. Then, we tested them on the MIMIC-III and eICU data sets, as shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>. In the second approach, we trained and validated them on the eICU data set instead. Then, we tested them on the eICU and MIMIC-III data sets. <xref ref-type="table" rid="table2">Table 2</xref> shows counts of the training, validation, and testing samples used in both methods from each data set (window size=6). The same cohort of patients was used in both cases, but eICU has much more patient data that led to a much bigger set than MIMIC-III. Finally, the model architectures and hyperparameters can be found on our Git repository [<xref ref-type="bibr" rid="ref24">24</xref>] and in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>.</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Sample counts for training, validation, and testing in both training methods.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="100"/>
            <col width="210"/>
            <col width="220"/>
            <col width="220"/>
            <col width="250"/>
            <thead>
              <tr valign="top">
                <td>Method</td>
                <td>Number of training samples</td>
                <td>Number of validation samples</td>
                <td>Number of first testing samples</td>
                <td>Number of second testing samples</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>#1</td>
                <td>73,190 (MIMIC-III)</td>
                <td>12,915 (MIMIC-III)</td>
                <td>21,526 (MIMIC-III)</td>
                <td>196,208 (eICU)</td>
              </tr>
              <tr valign="top">
                <td>#2</td>
                <td>166,776 (eICU)</td>
                <td>29,431 (eICU)</td>
                <td>49,052 (eICU)</td>
                <td>86,106 (MIMIC-III)</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Ethics Approval</title>
        <p>Approval for data collection, processing, and release for the MIMIC-III database has been granted by the Institutional Review Boards of the Beth Israel Deaconess Medical Center (Boston, United States) and Massachusetts Institute of Technology (Cambridge, United States). Approval for data collection, processing, and release for the eICU database has been granted by the eICU research committee and exempt from Institutional Review Board approval. All data were processed using the computational infrastructure at the RWTH Aachen University and the University Hospital at RWTH Aachen in accordance with European Union data protection laws.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <p>In <xref rid="figure8" ref-type="fig">Figures 8</xref>, 9 and 10, we report the validation loss, F1 score, and accuracy of the different models during training, respectively. The models’ names ending with “mimic” indicate that they were trained on the MIMIC-III data set and those ending in “eicu” refer to the models trained on the eICU data set. Moreover, because of the early stopping used during training, some models stopped training before others. Thus, their metrics are constant after the stopping point.</p>
      <p>In <xref ref-type="table" rid="table3">Tables 3</xref> and <xref ref-type="table" rid="table4">4</xref>, we report the testing accuracy, recall, precision, and F1 scores of the different models. All the results were averaged over all the lab values and the testing samples.</p>
      <p>As we expect our system to run continuously on huge amounts of data in hospitals, we want the performance of the chosen model to be good enough to meet such demands. Therefore, we measured the models' inference times. Experiments were run on a computer with an Intel(R) Core i9-9900K processor (Intel Corporation) running at 3.60 GHz using a 32-GB DDR4 RAM and Nvidia GTX 1080ti graphics processing unit (Nvidia Corporation), running Ubuntu (version 20.04, Canonical Ltd), Python (version 3.8, Python Software Foundation), and TensorFlow (version 2.6, Google Brain). <xref ref-type="table" rid="table5">Table 5</xref> reports the inference time for each model on a whole batch (batch size=128 samples).</p>
      <fig id="figure8" position="float">
        <label>Figure 8</label>
        <caption>
          <p>Validation loss of the different models. LSTM: long short-term network; M-CNN: multiple convolutional neural network; TCN: temporal convolutional network; Val.: validation;  ICU: intensive care unit.</p>
        </caption>
        <graphic xlink:href="medinform_v10i8e37658_fig8.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <fig id="figure9" position="float">
        <label>Figure 9</label>
        <caption>
          <p>Validation F1 score of the different models. LSTM: long short-term network; M-CNN: multiple convolutional neural network; TCN: temporal convolutional network; Val.: validation;  ICU: intensive care unit.</p>
        </caption>
        <graphic xlink:href="medinform_v10i8e37658_fig9.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <fig id="figure10" position="float">
        <label>Figure 10</label>
        <caption>
          <p>Validation accuracy of the different models. LSTM: long short-term network; M-CNN: multiple convolutional neural network; TCN: temporal convolutional network; Val.: validation;  ICU: intensive care unit.</p>
        </caption>
        <graphic xlink:href="medinform_v10i8e37658_fig10.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <table-wrap position="float" id="table3">
        <label>Table 3</label>
        <caption>
          <p>Testing results for the different models over all lab values (micro-average) on the MIMIC-III data set<sup>a</sup>.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="30"/>
          <col width="210"/>
          <col width="190"/>
          <col width="190"/>
          <col width="190"/>
          <col width="190"/>
          <thead>
            <tr valign="top">
              <td colspan="2">Training data set and model</td>
              <td>Accuracy</td>
              <td>Precision</td>
              <td>Recall</td>
              <td>F1 score</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td colspan="6">
                <bold>MIMIC-III</bold>
              </td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>LSTM<sup>b</sup></td>
              <td>0.85</td>
              <td>0.83</td>
              <td>0.87</td>
              <td>0.85</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>CNN<sup>c</sup></td>
              <td>0.86</td>
              <td>0.84</td>
              <td>0.85</td>
              <td>0.84</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>M-CNN<sup>d</sup></td>
              <td>0.88</td>
              <td>0.87</td>
              <td>0.89</td>
              <td>0.88</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Transformer</td>
              <td>0.86</td>
              <td>0.88</td>
              <td>0.81</td>
              <td>0.84</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>TCN<sup>e</sup></td>
              <td>0.86</td>
              <td>0.87</td>
              <td>0.85</td>
              <td>0.86</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>LightGBM<sup>f</sup></td>
              <td>0.83</td>
              <td>0.82</td>
              <td>0.76</td>
              <td>0.78</td>
            </tr>
            <tr valign="top">
              <td colspan="6">
                <bold>eICU</bold>
              </td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>LSTM</td>
              <td>0.8</td>
              <td>0.79</td>
              <td>0.81</td>
              <td>0.8</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>CNN</td>
              <td>0.85</td>
              <td>0.86</td>
              <td>0.83</td>
              <td>0.84</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>M-CNN</td>
              <td>0.87</td>
              <td>0.88</td>
              <td>0.86</td>
              <td>0.87</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Transformer</td>
              <td>0.86</td>
              <td>0.86</td>
              <td>0.84</td>
              <td>0.85</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>TCN</td>
              <td>0.83</td>
              <td>0.82</td>
              <td>0.84</td>
              <td>0.83</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>LightGBM</td>
              <td>0.82</td>
              <td>0.77</td>
              <td>0.78</td>
              <td>0.77</td>
            </tr>
          </tbody>
        </table>
        <table-wrap-foot>
          <fn id="table3fn1">
            <p><sup>a</sup>The models listed under MIMIC-III were trained on the MIMIC-III data set and those under eICU were trained on the eICU data set.</p>
          </fn>
          <fn id="table3fn2">
            <p><sup>b</sup>LSTM: long short-term memory.</p>
          </fn>
          <fn id="table3fn3">
            <p><sup>c</sup>CNN: convolutional neural network.</p>
          </fn>
          <fn id="table3fn4">
            <p><sup>d</sup>M-CNN: multiple convolutional neural network.</p>
          </fn>
          <fn id="table3fn5">
            <p><sup>e</sup>TCN: temporal convolutional network.</p>
          </fn>
          <fn id="table3fn6">
            <p><sup>f</sup>LightGBM: gradient boosting–based method.</p>
          </fn>
        </table-wrap-foot>
      </table-wrap>
      <table-wrap position="float" id="table4">
        <label>Table 4</label>
        <caption>
          <p>Testing results for the different models over all lab values (micro-average) on the eICU data set<sup>a</sup>.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="30"/>
          <col width="210"/>
          <col width="190"/>
          <col width="190"/>
          <col width="190"/>
          <col width="190"/>
          <thead>
            <tr valign="top">
              <td colspan="2">Training data set and model</td>
              <td>Accuracy</td>
              <td>Precision</td>
              <td>Recall</td>
              <td>F1 score</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td colspan="6">
                <bold>MIMIC-III</bold>
              </td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>LSTM<sup>b</sup></td>
              <td>0.79</td>
              <td>0.81</td>
              <td>0.8</td>
              <td>0.8</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>CNN<sup>c</sup></td>
              <td>0.78</td>
              <td>0.8</td>
              <td>0.8</td>
              <td>0.8</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>M-CNN<sup>d</sup></td>
              <td>0.8</td>
              <td>0.8</td>
              <td>0.83</td>
              <td>0.81</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Transformer</td>
              <td>0.75</td>
              <td>0.82</td>
              <td>0.69</td>
              <td>0.75</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>TCN<sup>e</sup></td>
              <td>0.71</td>
              <td>0.74</td>
              <td>0.72</td>
              <td>0.73</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>LightGBM<sup>f</sup></td>
              <td>0.75</td>
              <td>0.78</td>
              <td>0.75</td>
              <td>0.76</td>
            </tr>
            <tr valign="top">
              <td colspan="6">
                <bold>eICU</bold>
              </td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>LSTM</td>
              <td>0.82</td>
              <td>0.85</td>
              <td>0.83</td>
              <td>0.84</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>CNN</td>
              <td>0.85</td>
              <td>0.86</td>
              <td>0.83</td>
              <td>0.84</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>M-CNN</td>
              <td>0.89</td>
              <td>0.9</td>
              <td>0.91</td>
              <td>0.9</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Transformer</td>
              <td>0.86</td>
              <td>0.87</td>
              <td>0.88</td>
              <td>0.87</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>TCN</td>
              <td>0.89</td>
              <td>0.88</td>
              <td>0.89</td>
              <td>0.89</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>LightGBM</td>
              <td>0.82</td>
              <td>0.77</td>
              <td>0.78</td>
              <td>0.77</td>
            </tr>
          </tbody>
        </table>
        <table-wrap-foot>
          <fn id="table4fn1">
            <p><sup>a</sup>The models under MIMIC-III were trained on the MIMIC-III data set and those under eICU were trained on the eICU data set.</p>
          </fn>
          <fn id="table4fn2">
            <p><sup>b</sup>LSTM: long short-term memory.</p>
          </fn>
          <fn id="table4fn3">
            <p><sup>c</sup>CNN: convolutional neural network.</p>
          </fn>
          <fn id="table4fn4">
            <p><sup>d</sup>M-CNN: multiple convolutional neural network.</p>
          </fn>
          <fn id="table4fn5">
            <p><sup>e</sup>TCN: temporal convolutional network.</p>
          </fn>
          <fn id="table4fn6">
            <p><sup>f</sup>LightGBM: gradient boosting–based method.</p>
          </fn>
        </table-wrap-foot>
      </table-wrap>
      <table-wrap position="float" id="table5">
        <label>Table 5</label>
        <caption>
          <p>Inference time for the different models.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="500"/>
          <col width="500"/>
          <thead>
            <tr valign="top">
              <td>Model name</td>
              <td>Average inference time/batch</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td>LSTM<sup>a</sup></td>
              <td>654 ms</td>
            </tr>
            <tr valign="top">
              <td>CNN<sup>b</sup></td>
              <td>220 ms</td>
            </tr>
            <tr valign="top">
              <td>M-CNN<sup>c</sup></td>
              <td>285 ms</td>
            </tr>
            <tr valign="top">
              <td>TCN<sup>d</sup></td>
              <td>854 ms</td>
            </tr>
            <tr valign="top">
              <td>Transformer</td>
              <td>598 ms</td>
            </tr>
            <tr valign="top">
              <td>LightGBM<sup>e</sup></td>
              <td>121 ms</td>
            </tr>
          </tbody>
        </table>
        <table-wrap-foot>
          <fn id="table5fn1">
            <p><sup>a</sup>LSTM: long short-term memory.</p>
          </fn>
          <fn id="table5fn2">
            <p><sup>b</sup>CNN: convolutional neural network.</p>
          </fn>
          <fn id="table5fn3">
            <p><sup>c</sup>M-CNN: multiple convolutional neural network.</p>
          </fn>
          <fn id="table5fn4">
            <p><sup>d</sup>TCN: temporal convolutional network.</p>
          </fn>
          <fn id="table5fn5">
            <p><sup>e</sup>LightGBM: gradient boosting–based method.</p>
          </fn>
        </table-wrap-foot>
      </table-wrap>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <p>In this work, we developed an end-to-end system to extract and process lab results from EHRs and applied various machine learning algorithms to determine which lab values will be out of range in the next 4 hours with satisfactory results. This enables medical staff to focus on these lab values that can lead to improvements in overall patient diagnosis and treatment. Additionally, it can help reduce the time and cost wasted on irrelevant lab tests. The following steps were taken to reach this goal: First, we used SQL queries to extract the relevant patient data following our cohort from MIMIC-III and eICU data sets. Second, we used the time-windowed sample-and-hold method alongside k-nearest neighbor imputation with mean imputation and singular value decomposition to fill missing values. Moreover, we used the Tukey range test to detect anomalies and delete them. Third, we experimented with non-DL methods like LightGBM as well as 4 DL algorithms for time series classification. The DL-based method stacks models through mapping and processing functions between the models, using gradient descent or momentum methods to optimize fit. Gradient boosting methods like LightGBM iteratively fit models to error terms and average results within a generalized linear modeling framework using base learner models at each iteration, introducing a penalty term into the base learner models. Finally, we trained and tested our algorithms on 2 of the well-known EHR data sets, MIMIC-III and eICU. Cross-validating our algorithms on these 2 data sets ensures not only a broader performance comparison, but also helps analyze how far the different algorithms can generalize on new unseen data.</p>
      <p>A deeper analysis of the training results of the different DL-based models (<xref rid="figure8" ref-type="fig">Figures 8</xref>, 9 and 10) revealed that the M-CNN model trained on the eICU data set yielded better results at the end of the training than any other model. Additionally, we can see that the performance of both the TCN and transformer model improved significantly when trained on more data (eICU data set). This can be better understood from the results in <xref ref-type="table" rid="table3">Tables 3</xref> and <xref ref-type="table" rid="table4">4</xref>. First, the models trained on the eICU data set generalized better on data that they had not seen before from both the data sets. This is because the models had more data to train on, so they could see more variations and cases that they learned. On the other hand, the models trained on the MIMIC-III data set (43% the size of eICU training samples) performed well on the testing samples from MIMIC-III but performed much worse on the testing samples from eICU. Second, the M-CNN model performed the best in terms of almost all the evaluation metrics in both training methods. CNN models perform well on many sequenced modeling tasks, often outperforming RNN architectures like LSTM or GRU. Additionally, CNN-based models have the least number of trainable parameters out of the different DL-based methods and occupy the least memory, making them perform better on data sets with small amounts of training data. On the other hand, standard CNNs can only work with fixed-size inputs and usually focus on data elements that are in immediate proximity due to their static convolutional filter size. However, combining multiple CNN models helps increase the accuracy further by applying convolutions with multiple filter sizes and combining the outputs to give a more robust prediction. Moreover, in our case, we chose a static, relatively short input sequence length, thus mitigating the issue of long, variable length sequences. In case of long, variable length input sequences, a TCN will be a better candidate. A TCN employs techniques like multiple layers of dilated convolutions and padding of input sequences to handle different sequence lengths and detect dependencies between items that are not next to each other but are positioned on different places in a sequence. Furthermore, more complicated architectures like transformers and TCNs with many more trainable parameters would perform better if they had access to more data, which is often an issue in the medical field because of the scarcity of available training data. Therefore, M-CNN architectures are desirable for modeling medical time series data with static lengths and relatively short lengths like lab values requiring relatively smaller training data sets. Moreover, the M-CNN architecture can generalize well on unseen data when trained well, considering integrated measures for reducing overfitting during model training. An interesting fact is that despite not outperforming the M-CNN model, lightGBM performed as well (sometimes better) as some other DL-based approaches while requiring much less training time. Non-DL–based approaches can model problems with much less training data but require hand-crafted features and are very sensitive to outliers and variation in data. Further, removing seasonality is often needed when dealing with time series data. Finally, we can see that the LightGBM model is the fastest in terms of the inference time according to <xref ref-type="table" rid="table5">Table 5</xref>, followed by the CNN model, which is the fastest among the DL-based models. The M-CNN model, despite outperforming the regular CNN model, is 29% slower in terms of the inference time, which is expected as the model has more parameters.</p>
      <p>Overall, our comprehensive analysis shows the advantage of using DL models for classifying future abnormalities in lab values for patients in the ICU. Although we tested our algorithms on 2 of the most used EHR data sets, further testing is needed to assess the performance of the full pipeline on other EHRs, including the preprocessing steps and how well the tuned hyperparameters of the machine learning models will generalize. Nevertheless, we believe this study can help other researchers trying to use machine learning in modeling medical time series problems.</p>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Statistical properties of the input features from MIMIC-III and eICU data sets.</p>
        <media xlink:href="medinform_v10i8e37658_app1.pdf" xlink:title="PDF File  (Adobe PDF File), 84 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Details of the models used.</p>
        <media xlink:href="medinform_v10i8e37658_app2.pdf" xlink:title="PDF File  (Adobe PDF File), 648 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">CNN</term>
          <def>
            <p>convolutional neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">DL</term>
          <def>
            <p>deep learning</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">EHR</term>
          <def>
            <p>electronic health record</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">GRU</term>
          <def>
            <p>gated recurrent unit</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">ICU</term>
          <def>
            <p>intensive care unit</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">LSTM</term>
          <def>
            <p>long short-term memory</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">M-CNN</term>
          <def>
            <p>multiple convolutional neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">RNN</term>
          <def>
            <p>recurrent neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">RWTH</term>
          <def>
            <p>Rheinisch Westfälische Technische Hochschule</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">SQL</term>
          <def>
            <p>Structured Query Language</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">TCN</term>
          <def>
            <p>temporal convolutional network</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This work is funded by the European Institute of Innovation &#38; Technology (grant EIT-Health 19549). The funding institution of the study had no role in study design, data collection, data analysis, data interpretation, or writing of the paper.</p>
    </ack>
    <fn-group>
      <fn fn-type="con">
        <p>AA, AH, AP, and LM conceived the idea. AA and AH performed data extraction. AS, GD, and GM provided methodological inputs. LF worked on the data cohort and SQL queries. AS and AH reviewed the mathematical analysis. AA and AH worked on the figures, tables, and manuscript writing. LM had full access to all data in the study. All authors read and approved the final submitted manuscript.</p>
      </fn>
      <fn fn-type="conflict">
        <p>AP, GM, LM, AS, and GD are cofounders of Clinomic GmbH. AP and LM are chief executive officers of Clinomic GmbH. GM is the senior medical advisor, and GD and AS are scientific advisors in Clinomic GmbH. All remaining authors declare that they have no conflict of interests.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ayad</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Zamani</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Schmeink</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Dartmann</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Design and implementation of a hybrid anomaly detection system for IoT</article-title>
          <source>2019 Sixth International Conference on Internet of Things: Systems, Management and Security (IOTSMS)</source>
          <year>2019</year>
          <month>10</month>
          <conf-name>Sixth International Conference on Internet of Things: Systems, Management and Security</conf-name>
          <conf-date>October 22-25, 2019</conf-date>
          <conf-loc>Granada, Spain</conf-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>1</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1109/IOTSMS48152.2019.8939206</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sheikhalishahi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Miotto</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Dudley</surname>
              <given-names>JT</given-names>
            </name>
            <name name-style="western">
              <surname>Lavelli</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rinaldi</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Osmani</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Natural language processing of clinical notes on chronic diseases: systematic review</article-title>
          <source>JMIR Med Inform</source>
          <year>2019</year>
          <month>04</month>
          <volume>7</volume>
          <issue>2</issue>
          <fpage>e12239</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://medinform.jmir.org/2019/2/e12239/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/12239</pub-id>
          <pub-id pub-id-type="medline">31066697</pub-id>
          <pub-id pub-id-type="pii">v7i2e12239</pub-id>
          <pub-id pub-id-type="pmcid">PMC6528438</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Isensee</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Jaeger</surname>
              <given-names>PF</given-names>
            </name>
            <name name-style="western">
              <surname>Kohl</surname>
              <given-names>SAA</given-names>
            </name>
            <name name-style="western">
              <surname>Petersen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Maier-Hein</surname>
              <given-names>KH</given-names>
            </name>
          </person-group>
          <article-title>nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation</article-title>
          <source>Nat Methods</source>
          <year>2021</year>
          <month>02</month>
          <volume>18</volume>
          <issue>2</issue>
          <fpage>203</fpage>
          <lpage>211</lpage>
          <pub-id pub-id-type="doi">10.1038/s41592-020-01008-z</pub-id>
          <pub-id pub-id-type="medline">33288961</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41592-020-01008-z</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Zohren</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Time-series forecasting with deep learning: a survey</article-title>
          <source>Philos Trans A Math Phys Eng Sci</source>
          <year>2021</year>
          <month>04</month>
          <volume>379</volume>
          <issue>2194</issue>
          <fpage>20200209</fpage>
          <pub-id pub-id-type="doi">10.1098/rsta.2020.0209</pub-id>
          <pub-id pub-id-type="medline">33583273</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>HQ</given-names>
            </name>
            <name name-style="western">
              <surname>Lam</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Le</surname>
              <given-names>LT</given-names>
            </name>
            <name name-style="western">
              <surname>Pham</surname>
              <given-names>HH</given-names>
            </name>
            <name name-style="western">
              <surname>Tran</surname>
              <given-names>DQ</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>DB</given-names>
            </name>
            <name name-style="western">
              <surname>Le</surname>
              <given-names>DD</given-names>
            </name>
            <name name-style="western">
              <surname>Pham</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Tong</surname>
              <given-names>HTT</given-names>
            </name>
            <name name-style="western">
              <surname>Dinh</surname>
              <given-names>DH</given-names>
            </name>
            <name name-style="western">
              <surname>Do</surname>
              <given-names>CD</given-names>
            </name>
            <name name-style="western">
              <surname>Doan</surname>
              <given-names>LT</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>CN</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>BT</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>QV</given-names>
            </name>
            <name name-style="western">
              <surname>Hoang</surname>
              <given-names>AD</given-names>
            </name>
            <name name-style="western">
              <surname>Phan</surname>
              <given-names>HN</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>AT</given-names>
            </name>
            <name name-style="western">
              <surname>Ho</surname>
              <given-names>PH</given-names>
            </name>
            <name name-style="western">
              <surname>Ngo</surname>
              <given-names>DT</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>NT</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>NT</given-names>
            </name>
            <name name-style="western">
              <surname>Dao</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Vu</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>VinDr-CXR: an open dataset of chest X-rays with radiologist's annotations</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on Dec 30, 2020</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2012.15029"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2012.15029</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Johnson</surname>
              <given-names>AEW</given-names>
            </name>
            <name name-style="western">
              <surname>Pollard</surname>
              <given-names>TJ</given-names>
            </name>
            <name name-style="western">
              <surname>Shen</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Lehman</surname>
              <given-names>LH</given-names>
            </name>
            <name name-style="western">
              <surname>Feng</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ghassemi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Moody</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Szolovits</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Celi</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Mark</surname>
              <given-names>RG</given-names>
            </name>
          </person-group>
          <article-title>MIMIC-III, a freely accessible critical care database</article-title>
          <source>Sci Data</source>
          <year>2016</year>
          <month>05</month>
          <volume>3</volume>
          <fpage>160035</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/sdata.2016.35"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/sdata.2016.35</pub-id>
          <pub-id pub-id-type="medline">27219127</pub-id>
          <pub-id pub-id-type="pii">sdata201635</pub-id>
          <pub-id pub-id-type="pmcid">PMC4878278</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pollard</surname>
              <given-names>TJ</given-names>
            </name>
            <name name-style="western">
              <surname>Johnson</surname>
              <given-names>AEW</given-names>
            </name>
            <name name-style="western">
              <surname>Raffa</surname>
              <given-names>JD</given-names>
            </name>
            <name name-style="western">
              <surname>Celi</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Mark</surname>
              <given-names>RG</given-names>
            </name>
            <name name-style="western">
              <surname>Badawi</surname>
              <given-names>O</given-names>
            </name>
          </person-group>
          <article-title>The eICU Collaborative Research Database, a freely available multi-center database for critical care research</article-title>
          <source>Sci Data</source>
          <year>2018</year>
          <month>09</month>
          <volume>5</volume>
          <fpage>180178</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/sdata.2018.178"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/sdata.2018.178</pub-id>
          <pub-id pub-id-type="medline">30204154</pub-id>
          <pub-id pub-id-type="pii">sdata2018178</pub-id>
          <pub-id pub-id-type="pmcid">PMC6132188</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kong</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Based on improved deep convolutional neural network model pneumonia image classification</article-title>
          <source>PLoS One</source>
          <year>2021</year>
          <month>11</month>
          <volume>16</volume>
          <issue>11</issue>
          <fpage>e0258804</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0258804"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0258804</pub-id>
          <pub-id pub-id-type="medline">34735483</pub-id>
          <pub-id pub-id-type="pii">PONE-D-21-24030</pub-id>
          <pub-id pub-id-type="pmcid">PMC8568342</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Daghistani</surname>
              <given-names>TA</given-names>
            </name>
            <name name-style="western">
              <surname>Elshawi</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Sakr</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmed</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Thwayee</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Mallah</surname>
              <given-names>MH</given-names>
            </name>
          </person-group>
          <article-title>Predictors of in-hospital length of stay among cardiac patients: a machine learning approach</article-title>
          <source>Int J Cardiol</source>
          <year>2019</year>
          <month>08</month>
          <volume>288</volume>
          <fpage>140</fpage>
          <lpage>147</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ijcard.2019.01.046</pub-id>
          <pub-id pub-id-type="medline">30685103</pub-id>
          <pub-id pub-id-type="pii">S0167-5273(18)34602-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Perng</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kao</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Kung</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Hung</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lai</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Su</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Mortality prediction of septic patients in the emergency department based on machine learning</article-title>
          <source>J Clin Med</source>
          <year>2019</year>
          <month>11</month>
          <volume>8</volume>
          <issue>11</issue>
          <fpage>1906</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=jcm8111906"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/jcm8111906</pub-id>
          <pub-id pub-id-type="medline">31703390</pub-id>
          <pub-id pub-id-type="pii">jcm8111906</pub-id>
          <pub-id pub-id-type="pmcid">PMC6912277</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Frassica</surname>
              <given-names>JJ</given-names>
            </name>
          </person-group>
          <article-title>Frequency of laboratory test utilization in the intensive care unit and its implications for large-scale data collection efforts</article-title>
          <source>J Am Med Inform Assoc</source>
          <year>2005</year>
          <month>03</month>
          <volume>12</volume>
          <issue>2</issue>
          <fpage>229</fpage>
          <lpage>233</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/15561793"/>
          </comment>
          <pub-id pub-id-type="doi">10.1197/jamia.M1604</pub-id>
          <pub-id pub-id-type="medline">15561793</pub-id>
          <pub-id pub-id-type="pii">M1604</pub-id>
          <pub-id pub-id-type="pmcid">PMC551555</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kiss</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gede</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Hegyi</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Németh</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Földi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Dembrovszky</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Nagy</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Juhász</surname>
              <given-names>MF</given-names>
            </name>
            <name name-style="western">
              <surname>Ocskay</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Zádori</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Molnár</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Párniczky</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hegyi</surname>
              <given-names>PJ</given-names>
            </name>
            <name name-style="western">
              <surname>Szakács</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Pár</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Erőss</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Alizadeh</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Early changes in laboratory parameters are predictors of mortality and ICU admission in patients with COVID-19: a systematic review and meta-analysis</article-title>
          <source>Med Microbiol Immunol</source>
          <year>2021</year>
          <month>02</month>
          <volume>210</volume>
          <issue>1</issue>
          <fpage>33</fpage>
          <lpage>47</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/33219397"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s00430-020-00696-w</pub-id>
          <pub-id pub-id-type="medline">33219397</pub-id>
          <pub-id pub-id-type="pii">10.1007/s00430-020-00696-w</pub-id>
          <pub-id pub-id-type="pmcid">PMC7679241</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Butler</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Monsalve</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Thomas</surname>
              <given-names>GW</given-names>
            </name>
            <name name-style="western">
              <surname>Herman</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Segre</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Polgreen</surname>
              <given-names>PM</given-names>
            </name>
            <name name-style="western">
              <surname>Suneja</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Estimating time physicians and other health care workers spend with patients in an intensive care unit using a sensor network</article-title>
          <source>Am J Med</source>
          <year>2018</year>
          <month>08</month>
          <volume>131</volume>
          <issue>8</issue>
          <fpage>972.e9</fpage>
          <lpage>972.e15</lpage>
          <pub-id pub-id-type="doi">10.1016/j.amjmed.2018.03.015</pub-id>
          <pub-id pub-id-type="medline">29649458</pub-id>
          <pub-id pub-id-type="pii">S0002-9343(18)30296-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="web">
          <article-title>Clinical artificial intelligence improving healthcare</article-title>
          <source>eit Health</source>
          <access-date>2022-04-20</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://eithealth.eu/product-service/clinical-artificial-intelligence-improving-healthcare">https://eithealth.eu/product-service/clinical-artificial- intelligence-improving-healthcare</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Song</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Rajan</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Thiagarajan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Spanias</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Attend and diagnose: clinical time series analysis using attention models</article-title>
          <source>Proceedings of the AAAI Conference on Artificial Intelligence</source>
          <year>2018</year>
          <month>04</month>
          <conf-name>Thirty-Second AAAI Conference on Artificial Intelligence</conf-name>
          <conf-date>February 2-7, 2018</conf-date>
          <conf-loc>New Orleans, LA, United States</conf-loc>
          <publisher-loc>Palo Alto, California, United States</publisher-loc>
          <publisher-name>AAAI Press</publisher-name>
          <pub-id pub-id-type="doi">10.1609/aaai.v32i1.11635</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Harutyunyan</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Khachatrian</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Kale</surname>
              <given-names>DC</given-names>
            </name>
            <name name-style="western">
              <surname>Ver Steeg</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Galstyan</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Multitask learning and benchmarking with clinical time series data</article-title>
          <source>Sci Data</source>
          <year>2019</year>
          <month>06</month>
          <volume>6</volume>
          <issue>1</issue>
          <fpage>96</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41597-019-0103-9"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41597-019-0103-9</pub-id>
          <pub-id pub-id-type="medline">31209213</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41597-019-0103-9</pub-id>
          <pub-id pub-id-type="pmcid">PMC6572845</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shukla</surname>
              <given-names>SN</given-names>
            </name>
            <name name-style="western">
              <surname>Marlin</surname>
              <given-names>BM</given-names>
            </name>
          </person-group>
          <article-title>A survey on principles, models and methods for learning from irregularly sampled time series</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on Nov 30, 2020</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2012.00168"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2012.00168</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Weerakody</surname>
              <given-names>PB</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>KW</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Ela</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>A review of irregular time series data handling with gated recurrent neural networks</article-title>
          <source>Neurocomputing</source>
          <year>2021</year>
          <month>06</month>
          <volume>441</volume>
          <fpage>161</fpage>
          <lpage>178</lpage>
          <pub-id pub-id-type="doi">10.1016/j.neucom.2021.02.046</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Moor</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Horn</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Rieck</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Roqueiro</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Borgwardt</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Early recognition of sepsis with Gaussian process temporal convolutional networks and dynamic time warping</article-title>
          <source>Proceedings of the 4th Machine Learning for Healthcare Conference, PMLR</source>
          <year>2019</year>
          <month>08</month>
          <volume>106</volume>
          <fpage>2</fpage>
          <lpage>26</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://proceedings.mlr.press/v106/moor19a.html"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tyler</surname>
              <given-names>PD</given-names>
            </name>
            <name name-style="western">
              <surname>Du</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Feng</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Bai</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Horowitz</surname>
              <given-names>GL</given-names>
            </name>
            <name name-style="western">
              <surname>Stone</surname>
              <given-names>DJ</given-names>
            </name>
            <name name-style="western">
              <surname>Celi</surname>
              <given-names>LA</given-names>
            </name>
          </person-group>
          <article-title>Assessment of intensive care unit laboratory values that differ from reference ranges and association with patient mortality and length of stay</article-title>
          <source>JAMA Netw Open</source>
          <year>2018</year>
          <month>11</month>
          <volume>1</volume>
          <issue>7</issue>
          <fpage>e184521</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://jamanetwork.com/journals/jamanetworkopen/fullarticle/10.1001/jamanetworkopen.2018.4521"/>
          </comment>
          <pub-id pub-id-type="doi">10.1001/jamanetworkopen.2018.4521</pub-id>
          <pub-id pub-id-type="medline">30646358</pub-id>
          <pub-id pub-id-type="pii">2713040</pub-id>
          <pub-id pub-id-type="pmcid">PMC6324400</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mikhaeil</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Day</surname>
              <given-names>AG</given-names>
            </name>
            <name name-style="western">
              <surname>Ilan</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Non-essential blood tests in the intensive care unit: a prospective observational study</article-title>
          <source>Can J Anaesth</source>
          <year>2017</year>
          <month>03</month>
          <volume>64</volume>
          <issue>3</issue>
          <fpage>290</fpage>
          <lpage>295</lpage>
          <pub-id pub-id-type="doi">10.1007/s12630-016-0793-9</pub-id>
          <pub-id pub-id-type="medline">28000153</pub-id>
          <pub-id pub-id-type="pii">10.1007/s12630-016-0793-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Peine</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hallawa</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bickenbach</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Dartmann</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Fazlic</surname>
              <given-names>LB</given-names>
            </name>
            <name name-style="western">
              <surname>Schmeink</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ascheid</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Thiemermann</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Schuppert</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kindle</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Celi</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Marx</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Martin</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Development and validation of a reinforcement learning algorithm to dynamically optimize mechanical ventilation in critical care</article-title>
          <source>NPJ Digit Med</source>
          <year>2021</year>
          <month>02</month>
          <volume>4</volume>
          <issue>1</issue>
          <fpage>32</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-021-00388-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-021-00388-6</pub-id>
          <pub-id pub-id-type="medline">33608661</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41746-021-00388-6</pub-id>
          <pub-id pub-id-type="pmcid">PMC7895944</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="web">
          <article-title>ACP Internal Medicine Meeting</article-title>
          <source>Reference ranges</source>
          <access-date>2021-03-05</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://annualmeeting.acponline.org/educational-program/handouts/reference-ranges-table">https://annualmeeting.acponline.org/educational-program/handouts/reference-ranges-table</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ayad</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hallawa</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Schmeink</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Lab values abnormality detection (AI-LAD)</article-title>
          <source>a-ayad / AI_LAD</source>
          <year>2022</year>
          <access-date>2022-01-25</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://github.com/a-ayad/AI_LAD">https://git hub.com/a-ayad/AI_LAD</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mitra</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <source>Digital Signal Processing: A Computer Based Approach</source>
          <year>2010</year>
          <publisher-loc>Europe</publisher-loc>
          <publisher-name>McGraw-Hill Education</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Salgado</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Azevedo</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Proença</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Vieira</surname>
              <given-names>SM</given-names>
            </name>
          </person-group>
          <article-title>Missing data</article-title>
          <source>Secondary Analysis of Electronic Health Records</source>
          <year>2016</year>
          <month>09</month>
          <publisher-loc>Cham, Switzerland</publisher-loc>
          <publisher-name>Springer</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shi</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Hao</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>A synchronous prediction model based on multi-channel CNN with moving window for coal and electricity consumption in cement calcination process</article-title>
          <source>Sensors (Basel)</source>
          <year>2021</year>
          <month>06</month>
          <volume>21</volume>
          <issue>13</issue>
          <fpage>4284</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s21134284"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s21134284</pub-id>
          <pub-id pub-id-type="medline">34201548</pub-id>
          <pub-id pub-id-type="pii">s21134284</pub-id>
          <pub-id pub-id-type="pmcid">PMC8271547</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bangyal</surname>
              <given-names>WH</given-names>
            </name>
            <name name-style="western">
              <surname>Qasim</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Rehman</surname>
              <given-names>NU</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmad</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Dar</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Rukhsar</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Aman</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmad</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Detection of fake news text classification on COVID-19 using deep learning approaches</article-title>
          <source>Comput Math Methods Med</source>
          <year>2021</year>
          <month>11</month>
          <volume>2021</volume>
          <fpage>5514220</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1155/2021/5514220"/>
          </comment>
          <pub-id pub-id-type="doi">10.1155/2021/5514220</pub-id>
          <pub-id pub-id-type="medline">34819990</pub-id>
          <pub-id pub-id-type="pmcid">PMC8608495</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vaswani</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Shazeer</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Parmar</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Uszkoreit</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Jones</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Gomez</surname>
              <given-names>AN</given-names>
            </name>
            <name name-style="western">
              <surname>Kaiser</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Polosukhin</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Attention is all you need</article-title>
          <source>Proceedings of the Advances in Neural Information Processing Systems</source>
          <year>2017</year>
          <conf-name>31st Conference on Neural Information Processing Systems (NIPS 2017)</conf-name>
          <conf-date>December 4-9, 2017</conf-date>
          <conf-loc>Long Beach, CA, United States</conf-loc>
          <publisher-name>Curran Associates, Inc</publisher-name>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://proceedings.neurips.cc/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kazemi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Goel</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Eghbali</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ramanan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Sahota</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Thakur</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Brubaker</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Time2vec: learning a vector representation of time</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on Jul 11, 2019</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/1907.05321"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lea</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Vidal</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Reiter</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hager</surname>
              <given-names>GD</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Hua</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Jégou</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Temporal convolutional networks: a unified approach to action segmentation</article-title>
          <source>Computer Vision – ECCV 2016 Workshops. ECCV 2016. Lecture Notes in Computer Science</source>
          <year>2016</year>
          <month>11</month>
          <publisher-loc>Cham, Switzerland</publisher-loc>
          <publisher-name>Springer</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ke</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Meng</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Finley</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>TY</given-names>
            </name>
          </person-group>
          <article-title>LightGBM: a highly efficient gradient boosting decision tree</article-title>
          <source>Proceedings of the Advances in Neural Information Processing Systems</source>
          <year>2017</year>
          <conf-name>31st Conference on Neural Information Processing Systems (NIPS 2017)</conf-name>
          <conf-date>December 4-9, 2017</conf-date>
          <conf-loc>Long Beach, CA, United States</conf-loc>
          <publisher-name>Curran Associates, Inc</publisher-name>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://proceedings.neurips.cc/paper/2017/file/6449f44a102fde848669bdd9eb6b76fa-Paper.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hochreiter</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Schmidhuber</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Long short-term memory</article-title>
          <source>Neural Comput</source>
          <year>1997</year>
          <month>11</month>
          <volume>9</volume>
          <issue>8</issue>
          <fpage>1735</fpage>
          <lpage>1780</lpage>
          <pub-id pub-id-type="doi">10.1162/neco.1997.9.8.1735</pub-id>
          <pub-id pub-id-type="medline">9377276</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Understanding the disharmony between dropout and batch normalization by variance shift</article-title>
          <source>Proceedings of the 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)</source>
          <year>2019</year>
          <month>06</month>
          <conf-name>2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name>
          <conf-date>June 15-20, 2019</conf-date>
          <conf-loc>Long Beach, CA, United States</conf-loc>
          <publisher-name>IEEE</publisher-name>
          <pub-id pub-id-type="doi">10.1109/CVPR.2019.00279</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Matsugu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mori</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Mitari</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Kaneda</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Subject independent facial expression recognition with robust face detection using a convolutional neural network</article-title>
          <source>Neural Netw</source>
          <year>2003</year>
          <month>06</month>
          <volume>16</volume>
          <issue>5-6</issue>
          <fpage>555</fpage>
          <lpage>559</lpage>
          <pub-id pub-id-type="doi">10.1016/S0893-6080(03)00115-1</pub-id>
          <pub-id pub-id-type="medline">12850007</pub-id>
          <pub-id pub-id-type="pii">S0893-6080(03)00115-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Xiao</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Breast cancer histopathological image classification using convolutional neural networks with small SE-ResNet module</article-title>
          <source>PLoS One</source>
          <year>2019</year>
          <month>03</month>
          <volume>14</volume>
          <issue>3</issue>
          <fpage>e0214587</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0214587"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0214587</pub-id>
          <pub-id pub-id-type="medline">30925170</pub-id>
          <pub-id pub-id-type="pii">PONE-D-18-28454</pub-id>
          <pub-id pub-id-type="pmcid">PMC6440620</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>CL-ACP: a parallel combination of CNN and LSTM anticancer peptide recognition model</article-title>
          <source>BMC Bioinformatics</source>
          <year>2021</year>
          <month>10</month>
          <volume>22</volume>
          <issue>1</issue>
          <fpage>512</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcbioinformatics.biomedcentral.com/articles/10.1186/s12859-021-04433-9"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12859-021-04433-9</pub-id>
          <pub-id pub-id-type="medline">34670488</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12859-021-04433-9</pub-id>
          <pub-id pub-id-type="pmcid">PMC8527680</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Maas</surname>
              <given-names>AL</given-names>
            </name>
            <name name-style="western">
              <surname>Hannun</surname>
              <given-names>AY</given-names>
            </name>
            <name name-style="western">
              <surname>Ng</surname>
              <given-names>AY</given-names>
            </name>
          </person-group>
          <article-title>Rectifier nonlinearities improve neural network acoustic models</article-title>
          <source>Proceedings of the 30th International Conference on Machine Learning</source>
          <year>2013</year>
          <month>06</month>
          <conf-name>30th International Conference on Machine Learning</conf-name>
          <conf-date>June 17-19, 2013</conf-date>
          <conf-loc>Atlanta, Georgia, United States</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Mu</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Ranjan</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Zomaya</surname>
              <given-names>AY</given-names>
            </name>
          </person-group>
          <article-title>Temporal convolutional networks for the advance prediction of ENSO</article-title>
          <source>Sci Rep</source>
          <year>2020</year>
          <month>05</month>
          <volume>10</volume>
          <issue>1</issue>
          <fpage>8055</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-020-65070-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-020-65070-5</pub-id>
          <pub-id pub-id-type="medline">32415130</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-020-65070-5</pub-id>
          <pub-id pub-id-type="pmcid">PMC7229218</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="web">
          <article-title>Keras tuner library</article-title>
          <source>keras-team/keras-tuner</source>
          <year>2022</year>
          <access-date>2021-10-09</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://github.com/keras-team/keras-tuner">https://github.com/keras-team/keras-tuner</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Murphy</surname>
              <given-names>KP</given-names>
            </name>
          </person-group>
          <source>Machine Learning: A Probabilistic Perspective</source>
          <year>2013</year>
          <publisher-loc>United States</publisher-loc>
          <publisher-name>MIT Press</publisher-name>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
