<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMI</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Med Inform</journal-id>
      <journal-title>JMIR Medical Informatics</journal-title>
      <issn pub-type="epub">2291-9694</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v10i8e32319</article-id>
      <article-id pub-id-type="pmid">35947437</article-id>
      <article-id pub-id-type="doi">10.2196/32319</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Using the Diagnostic Odds Ratio to Select Patterns to Build an Interpretable Pattern-Based Classifier in a Clinical Domain: Multivariate Sequential Pattern Mining Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Lovis</surname>
            <given-names>Christian</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Hu</surname>
            <given-names>Dian</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Nuutinen</surname>
            <given-names>Mikko</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Arbabisarjou</surname>
            <given-names>Azizollah</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Casanova</surname>
            <given-names>Isidoro J</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>AIKE Research Team (INTICO)</institution>
            <institution>Computer Science Faculty</institution>
            <institution>University of Murcia</institution>
            <addr-line>Edificio 32, Campus de Espinardo</addr-line>
            <addr-line>Murcia, 30100</addr-line>
            <country>Spain</country>
            <phone>34 868887150</phone>
            <email>isidoroj@um.es</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-5651-3935</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Campos</surname>
            <given-names>Manuel</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-5233-3769</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Juarez</surname>
            <given-names>Jose M</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-1776-1992</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Gomariz</surname>
            <given-names>Antonio</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-7234-3331</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Lorente-Ros</surname>
            <given-names>Marta</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff5" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-3081-9952</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Lorente</surname>
            <given-names>Jose A</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff6" ref-type="aff">6</xref>
          <xref rid="aff7" ref-type="aff">7</xref>
          <xref rid="aff8" ref-type="aff">8</xref>
          <xref rid="aff9" ref-type="aff">9</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-7679-9187</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>AIKE Research Team (INTICO)</institution>
        <institution>Computer Science Faculty</institution>
        <institution>University of Murcia</institution>
        <addr-line>Murcia</addr-line>
        <country>Spain</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Murcian Bio-Health Institute (IMIB-Arrixaca)</institution>
        <addr-line>Murcia</addr-line>
        <country>Spain</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>CIBERFES Fragilidad y Envejecimiento Saludable</institution>
        <addr-line>Madrid</addr-line>
        <country>Spain</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Amazon Research</institution>
        <addr-line>Madrid</addr-line>
        <country>Spain</country>
      </aff>
      <aff id="aff5">
        <label>5</label>
        <institution>Department of Medicine</institution>
        <institution>Mount Sinai St Luke's-Roosevelt Hospital</institution>
        <institution>Icahn School of Medicine at Mount Sinai</institution>
        <addr-line>New York, NY</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff6">
        <label>6</label>
        <institution>Intensive Care Unit</institution>
        <institution>University Hospital of Getafe</institution>
        <addr-line>Getafe</addr-line>
        <country>Spain</country>
      </aff>
      <aff id="aff7">
        <label>7</label>
        <institution>School of Medicine</institution>
        <institution>European University of Madrid</institution>
        <addr-line>Madrid</addr-line>
        <country>Spain</country>
      </aff>
      <aff id="aff8">
        <label>8</label>
        <institution>CIBER de Enfermedades Respiratorias</institution>
        <institution>Instituto de Salud Carlos III</institution>
        <addr-line>Madrid</addr-line>
        <country>Spain</country>
      </aff>
      <aff id="aff9">
        <label>9</label>
        <institution>Department of Bioengineering</institution>
        <institution>Universidad Carlos III</institution>
        <addr-line>Madrid</addr-line>
        <country>Spain</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Isidoro J Casanova <email>isidoroj@um.es</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <month>8</month>
        <year>2022</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>10</day>
        <month>8</month>
        <year>2022</year>
      </pub-date>
      <volume>10</volume>
      <issue>8</issue>
      <elocation-id>e32319</elocation-id>
      <history>
        <date date-type="received">
          <day>22</day>
          <month>7</month>
          <year>2021</year>
        </date>
        <date date-type="rev-request">
          <day>2</day>
          <month>1</month>
          <year>2022</year>
        </date>
        <date date-type="rev-recd">
          <day>26</day>
          <month>2</month>
          <year>2022</year>
        </date>
        <date date-type="accepted">
          <day>27</day>
          <month>3</month>
          <year>2022</year>
        </date>
      </history>
      <copyright-statement>©Isidoro J Casanova, Manuel Campos, Jose M Juarez, Antonio Gomariz, Marta Lorente-Ros, Jose A Lorente. Originally published in JMIR Medical Informatics (https://medinform.jmir.org), 10.08.2022.</copyright-statement>
      <copyright-year>2022</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Informatics, is properly cited. The complete bibliographic information, a link to the original publication on https://medinform.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://medinform.jmir.org/2022/8/e32319" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>It is important to exploit all available data on patients in settings such as intensive care burn units (ICBUs), where several variables are recorded over time. It is possible to take advantage of the multivariate patterns that model the evolution of patients to predict their survival. However, pattern discovery algorithms generate a large number of patterns, of which only some are relevant for classification.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>We propose to use the diagnostic odds ratio (DOR) to select multivariate sequential patterns used in the classification in a clinical domain, rather than employing frequency properties.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We used data obtained from the ICBU at the University Hospital of Getafe, where 6 temporal variables for 465 patients were registered every day during 5 days, and to model the evolution of these clinical variables, we used multivariate sequential patterns by applying 2 different discretization methods for the continuous attributes. We compared 4 ways in which to employ the DOR for pattern selection: (1) we used it as a threshold to select patterns with a minimum DOR; (2) we selected patterns whose differential DORs are higher than a threshold with regard to their extensions; (3) we selected patterns whose DOR CIs do not overlap; and (4) we proposed the combination of threshold and nonoverlapping CIs to select the most discriminative patterns. As a baseline, we compared our proposals with Jumping Emerging Patterns, one of the most frequently used techniques for pattern selection that utilizes frequency properties.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>We have compared the number and length of the patterns eventually selected, classification performance, and pattern and model interpretability. We show that discretization has a great impact on the accuracy of the classification model, but that a trade-off must be found between classification accuracy and the physicians’ capacity to interpret the patterns obtained. We have also identified that the experiments combining threshold and nonoverlapping CIs (Option 4) obtained the fewest number of patterns but also with the smallest size, thus implying the loss of an acceptable accuracy with regard to clinician interpretation. The best classification model according to the trade-off is a JRIP classifier with only 5 patterns (20 items) that was built using unsupervised correlation preserving discretization and differential DOR in a beam search for the best pattern. It achieves a specificity of 56.32% and an area under the receiver operating characteristic curve of 0.767.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>A method for the classification of patients’ survival can benefit from the use of sequential patterns, as these patterns consider knowledge about the temporal evolution of the variables in the case of ICBU. We have proved that the DOR can be used in several ways, and that it is a suitable measure to select discriminative and interpretable quality patterns.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>sequential patterns</kwd>
        <kwd>survival classification</kwd>
        <kwd>diagnostic odds ratio</kwd>
        <kwd>burn units</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Overview</title>
        <p>Advances in the collection and storage of data have led to the emergence of complex temporal data sets, in which the data instances are traces of complex behavior characterized by time series of multiple variables.</p>
        <p>In the clinical domain, patients who have incurred severe burns are treated in intensive care burn units (ICBUs). The first 5 days are fundamental: there is a resuscitation phase during the first 2 days and a stabilization phase during the following 3 days, and the patient’s evolution (incomings, diuresis, fluid balance, pH, bicarbonate, base excess) is registered over this period. These variables are not considered in scores for mortality prediction and may play a relevant role in improving the current knowledge of the problem.</p>
        <p>Designing algorithms that are capable of learning patterns and classification models from such data is one of the most challenging topics in data mining research [<xref ref-type="bibr" rid="ref1">1</xref>]. One approach to deal with this problem is discovering patterns that are used as predictors in classification algorithms [<xref ref-type="bibr" rid="ref2">2</xref>].</p>
        <p>The number of patterns initially generated is usually very large, but only a few of these patterns are likely to be of interest to the domain expert that analyzes the data. There are several reasons for this: many of the patterns are either irrelevant or obvious, many patterns do not provide new knowledge regarding the domain, and many of them are similar or are included in others. Measures of the level of interest are, therefore, required to reduce the number of patterns, thus increasing the utility, usefulness, and relevance of the patterns discovered [<xref ref-type="bibr" rid="ref3">3</xref>]. Some of these interestingness measures are based on the statistical significance of discriminative patterns.</p>
        <p>In addition to traditional multidimensional analysis and data mining tasks, one interesting task is that of discovering notable changes and comparative differences. This leads to gradient mining and discriminant analysis [<xref ref-type="bibr" rid="ref4">4</xref>].</p>
        <p>Discriminative pattern mining is one of the most important techniques in data mining. This challenging task comprises a group of pattern mining techniques designed to discover a set of significant patterns that occur with disproportionate frequencies in different class-labeled data sets [<xref ref-type="bibr" rid="ref5">5</xref>]. Research on discriminative patterns evolves rapidly under several nonuniform definitions, such as contrast sets, emerging patterns, or subgroups. However, these definitions are actually equivalent because their target patterns can be used interchangeably with the same ability to capture the differences between distinct classes [<xref ref-type="bibr" rid="ref5">5</xref>].</p>
        <p>The exploration of discriminative patterns generally includes 2 aspects: frequency and statistical significance. On the one hand, the frequency of a pattern can be assessed by its support, which is defined as the percentage of transactions (in our case, patients) that this pattern contains. A pattern is frequent if its support value is higher than a given threshold.</p>
        <p>On the other hand, the statistical significance of discriminative patterns can be measured by using various statistic tests. A pattern is deemed significant if its significance value generated from a certain statistical measure could meet certain user-defined conditions, for example, no less (or more) than a given threshold. Any statistical measure that is capable of quantifying the differences between classes, such as the odds ratio, information gain, or chi-square, is generally applicable, and the choice of this measure will not typically affect the overall performance of the discriminative pattern discovery algorithms [<xref ref-type="bibr" rid="ref5">5</xref>].</p>
        <p>Many specific quantitative indicators of diagnostic test performance have been introduced into the clinical domain, such as sensitivity and specificity, positive and negative predictive values, chance-corrected measures of agreement, likelihood ratios or area under the receiver operating characteristic curve (AUC), among others. But there is a single indicator of diagnostic performance, denominated as the diagnostic odds ratio (DOR), which is closely linked to existing indicators, facilitates the formal meta-analysis of studies on diagnostic test performance, and is derived from logistic models [<xref ref-type="bibr" rid="ref6">6</xref>].</p>
        <p>We propose and compare 4 approaches in which the DOR is used as a statistical measure to select a reduced number of patterns, and we put forward the use of these patterns as predictors in a classification model. The calculation of the DOR for a pattern enables us to use a terminology that is closer to the language of clinicians, in which a pattern is considered to be a risk factor or to have a protection factor.</p>
        <p>The first approach consists of using the DOR as a minimum threshold with which to select patterns. In the second approach, we calculate the difference in the DOR of a sequential pattern with respect to its extensions, and we establish a threshold for this difference to reduce the number of patterns selected. One advantage of this approach is that it can be used as an early pruning within the pattern discovery algorithm. In the third place, we calculate a CI for the DOR, and use this CI to prune patterns that are not statistically different from their extension patterns. Finally, we combine the second and third approaches to select patterns with different properties.</p>
        <p>We have verified that these propositions provide acceptable results by building a model for the classification of patients’ survival using their daily evolution in an ICBU, employing multivariate sequential patterns. We have additionally compared the 4 approaches with the selection of patterns founded on classical frequency-based measures such as Jumping Emerging Patterns (JEPs).</p>
      </sec>
      <sec>
        <title>Background</title>
        <sec>
          <title>Sequential Pattern Mining</title>
          <p>A sequence database is based on ordered elements or events, recorded with or without a concrete notion of time. There are many applications involving sequence data, such as economic and sales forecasting, speech or audio signals, web click streams, or biological sequences. The mining of frequently occurring ordered events or subsequences as patterns was first introduced by Agrawal and Srikant [<xref ref-type="bibr" rid="ref7">7</xref>] and has become a significant challenge in data mining.</p>
          <p>The purpose of sequential pattern mining is to discover interesting subsequences in a sequence database, that is, sequential relationships between items that are of interest to the user. Various measures can be used to estimate how interesting a subsequence is. In the original sequential pattern mining problem, the support measure is used. The support (or absolute support) of a sequence <italic>s</italic> in a sequence database is defined as the number of sequences that contain <italic>s</italic>, and is denoted by <italic>sup</italic>(<italic>s</italic>).</p>
          <p>Sequential pattern mining is the task of finding all the frequent subsequences in a sequence database. A sequence <italic>s</italic> is said to be a frequent sequence or a sequential pattern if and only if <italic>sup</italic>(<italic>s</italic>)≥<italic>minsup</italic>, for a threshold <italic>minsup</italic> established by the user. The assumption is that frequent subsequences are of interest to the user.</p>
          <p>With regard to the algorithms employed to mine sequential patterns, there are 3 pioneer proposals: the GSP algorithm with the a priori strategy [<xref ref-type="bibr" rid="ref8">8</xref>]; the SPADE algorithm, an a priori–based sequential pattern mining algorithm that uses vertical data format [<xref ref-type="bibr" rid="ref9">9</xref>]; and PrefixSpan with the pattern growth strategy [<xref ref-type="bibr" rid="ref10">10</xref>]. A number of algorithms based on these 3 proposals have focused on improving their efficiency using different search strategies or data structures.</p>
          <p>The researchers refer the reader to [<xref ref-type="bibr" rid="ref11">11</xref>] for more general information about sequential pattern mining.</p>
        </sec>
        <sec>
          <title>Pattern and Sequence-Based Classification</title>
          <p>Classification rule mining attempts to discover a small set of rules in the database to form an accurate classifier.</p>
          <p>Initial approaches that combined pattern mining and classification models employed a strict stepwise approach, in which a set of patterns was computed once and those patterns were subsequently used in models. However, a large number of methods were later proposed, whose aim was to integrate pattern mining, feature selection, and model construction [<xref ref-type="bibr" rid="ref12">12</xref>].</p>
          <p>Some of these are Classification Based on Predictive Association Rules (CPAR), Classification Based on Multiple Association Rules (CMAR) [<xref ref-type="bibr" rid="ref12">12</xref>], Multi-class, Multi-label Associative Classification (MMAC), and Classification Based on Associations (CBA). Many experimental studies have shown that these integrated classification methods have a high potential approach that builds more predictive and accurate classification systems than traditional classification methods such as decision trees [<xref ref-type="bibr" rid="ref13">13</xref>].</p>
          <p>The classification of sequence patterns is one of the most popular methodologies whose power has been demonstrated by multiple studies [<xref ref-type="bibr" rid="ref14">14</xref>], and which has a broad range of real-world applications. In medical informatics, the classification of electrocardiogram time series (the time series of heart rates) shows whether the data originates from a healthy person or from a patient with heart disease [<xref ref-type="bibr" rid="ref15">15</xref>], whereas in financial systems, transaction sequence data in a bank are classified for the purpose of fighting money laundering [<xref ref-type="bibr" rid="ref16">16</xref>].</p>
          <p>The sequence classification methods can be divided into 3 large categories [<xref ref-type="bibr" rid="ref14">14</xref>]:</p>
          <list list-type="bullet">
            <list-item>
              <p>The first category is that of feature-based classification, during which a sequence is transformed into a feature vector, after which conventional classification methods are applied. Feature selection plays an important role in this kind of methods.</p>
            </list-item>
            <list-item>
              <p>The second category is sequence distance–based classification. The distance function that measures the similarity between sequences determines the quality of the classification in a significant manner.</p>
            </list-item>
            <list-item>
              <p>The third category is model-based classification, such as using the hidden Markov model and other statistical models to classify sequences.</p>
            </list-item>
          </list>
          <p>Conventional classification methods, such as neural networks or decision trees, are designed to classify feature vectors. One way to solve the problem of sequence classification is to transform a sequence into a vector of features by means of feature selections. Sequences can be classified by employing conventional classification methods, such as support vector machine and decision trees.</p>
          <p>Several researchers have worked toward building sequence classifiers based on frequent sequential patterns. Lesh et al [<xref ref-type="bibr" rid="ref17">17</xref>] proposed an algorithm for sequence classification using frequent patterns as features in the classifier. In their algorithm, subsequences are extracted and transformed into sets of features. After feature extraction, general classification algorithms such as support vector machine, naïve Bayes, or neural network can be used for classification. Their algorithm is the first attempt to combine classification and sequential pattern mining.</p>
          <p>Tseng and Lee [<xref ref-type="bibr" rid="ref18">18</xref>] proposed a Classify-By-Sequence (CBS) algorithm to combine sequential pattern mining and classification. Two algorithms, namely, “CBS Class” and “CBS All,” were proposed in their paper. In “CBS Class,” the database is divided into a number of subdatabases according to the class label of each instance. Sequential pattern mining is then implemented on each subdatabase. In “CBS All,” a conventional sequential pattern mining algorithm is applied on the whole data set. Weighted scoring is used in both algorithms.</p>
          <p>With regard to the ICBU, few studies have dealt with the problem of survival prediction using machine learning or intelligent data analysis [<xref ref-type="bibr" rid="ref19">19</xref>].</p>
        </sec>
        <sec>
          <title>Interestingness Measures for Sequence Classification</title>
          <p>In the original sequential pattern mining problem, the main measure used is support. The assumption is that frequent subsequences are of interest to the user.</p>
          <p>A first important limitation of the traditional sequential pattern mining problem is that a huge number of patterns may be generated by the algorithms, depending on how the <italic>minsup</italic> threshold is set and on the characteristics of the database [<xref ref-type="bibr" rid="ref11">11</xref>]. Finding too many patterns could hamper the effectiveness in some cases to which other measures could be better suited.</p>
          <p>Many other rule interestingness measures are already used in data mining, machine learning, and statistics. Geng and Hamilton [<xref ref-type="bibr" rid="ref20">20</xref>] have gathered together 9 different criteria that specify the interestingness of a pattern. These 9 criteria are conciseness, generality, reliability, peculiarity, diversity, novelty, surprisingness, utility, and actionability. These authors additionally classify these criteria into 3 main categories: objective, subjective, and semantics-based measures. Objective measures are those that depend only on raw data. Subjective measures are those that consider the users’ background knowledge in addition to data, and finally semantic-based measures are a special type of subjective measures that take into account the explanation and the semantic of a pattern which are, like subjective measures, domain specific.</p>
          <p>In this paper we focus on the probability-base objective measures used in the clinical domain. Some examples of objective rule interestingness measures that are often used in epidemiology as a statistical metric are presented in <xref ref-type="table" rid="table1">Table 1</xref>.</p>
          <table-wrap position="float" id="table1">
            <label>Table 1</label>
            <caption>
              <p>Usual clinical objective rule interestingness measures for rules in the form of A→c.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="500"/>
              <col width="500"/>
              <thead>
                <tr valign="top">
                  <td>Measure</td>
                  <td>Formula</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Support</td>
                  <td><italic>P</italic>(<italic>Ac</italic>)</td>
                </tr>
                <tr valign="top">
                  <td>Confidence</td>
                  <td><italic>P</italic>(<italic>c&#124;A</italic>)</td>
                </tr>
                <tr valign="top">
                  <td>Coverage</td>
                  <td><italic>P</italic>(<italic>A</italic>)</td>
                </tr>
                <tr valign="top">
                  <td>Prevalence</td>
                  <td><italic>P</italic>(<italic>B</italic>)</td>
                </tr>
                <tr valign="top">
                  <td>Specificity</td>
                  <td>
                    <inline-graphic xlink:href="medinform_v10i8e32319_fig1.png" xlink:type="simple" mimetype="image"/>
                  </td>
                </tr>
                <tr valign="top">
                  <td>Accuracy</td>
                  <td>
                    <inline-graphic xlink:href="medinform_v10i8e32319_fig2.png" xlink:type="simple" mimetype="image"/>
                  </td>
                </tr>
                <tr valign="top">
                  <td>Diagnostic odds ratio</td>
                  <td>
                    <graphic xlink:href="medinform_v10i8e32319_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
                  </td>
                </tr>
                <tr valign="top">
                  <td>Relative risk</td>
                  <td>
                    <graphic xlink:href="medinform_v10i8e32319_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
                  </td>
                </tr>
              </tbody>
            </table>
          </table-wrap>
          <p>Relative risk and the DOR are statistical metrics that are often used in epidemiological studies. They are consistent: a larger odds ratio leads to a larger relative risk, and vice versa. Under the rare disease assumption, the DOR approximates the relative risk [<xref ref-type="bibr" rid="ref21">21</xref>]. The DOR is usually used in case-control studies.</p>
          <p>Li et al [<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref22">22</xref>] used an epidemiological metric, relative risk, to measure pattern interestingness, and concluded that it is an optimal measure to find high-risk patterns. The proposed method was more efficient in covering the search space and produced a smaller number of rules. However, the number of rules in the output could still be too large for an easy interpretation. The authors applied the method to a real-world medical and pharmaceutical–linked data set and it revealed some patterns that are potentially useful in clinical practice.</p>
          <p>Most of the conventional frequent pattern–based classification algorithms follow 2 steps [<xref ref-type="bibr" rid="ref23">23</xref>]. The first step consists of mining a complete set of sequential patterns given a minimum support, while the second consists of selecting a number of discriminative patterns with which to build a classifier. In most cases, mining a complete set of sequential patterns in a large data set is extremely time-consuming, and the huge number of patterns discovered signifies that pattern selection and classifier building are also very time-consuming.</p>
          <p>In fact, the most important consideration in sequence classification is not that of finding the complete rule set, but rather that of discovering the most discriminative patterns. In this respect, more attention has recently been paid to discriminative frequent pattern discovery for effective classification.</p>
          <p>Heierman et al [<xref ref-type="bibr" rid="ref24">24</xref>] presented a new data mining technique based on the Minimum Description Length principle, which discovers interesting features in a time-ordered sequence. Petitjean et al [<xref ref-type="bibr" rid="ref25">25</xref>] introduced a method with which to exactly and efficiently identify the <italic>k</italic> most interesting patterns in a sequential database for which the difference between its observed and expected frequency is maximum: a measure denominated as leverage. Other authors focused on measures for the selection of patterns, such as the relative risk or a coverage measure [<xref ref-type="bibr" rid="ref26">26</xref>].</p>
          <p>In the clinical domain, univariate frequent episodes of Sequential Organ Failure Assessment (SOFA) subscores during the first days after admission were identified in Toma et al [<xref ref-type="bibr" rid="ref27">27</xref>]. The authors then selected a reduced number of patterns using Akaike’s information criterion to build a logistic regression model to predict the survivability of patients with multiorgan failure. Later, Toma et al [<xref ref-type="bibr" rid="ref28">28</xref>] showed that the use of univariate patterns as predictors is at least as effective as clinical scores.</p>
          <p>After mining JEPs, Ghosh [<xref ref-type="bibr" rid="ref29">29</xref>] used coupled hidden Markov learning models to build robust sequential patterns–based classifiers. This made it possible to predict hypotension risk, an acute hypotensive episode, or even of a septic shock, with the measurements of the mean arterial pressure, the heart rate, and the respiratory rate.</p>
        </sec>
      </sec>
      <sec>
        <title>Survival Prediction in Intensive Care Burn Units</title>
        <p>ICBUs are specialized units in which the main pathologies treated are inhalation injuries and severe burns. Early mortality prediction after admission is essential before an aggressive or conservative therapy can be recommended. Severity scores are simple but useful tools for physicians when evaluating the state of the patient [<xref ref-type="bibr" rid="ref30">30</xref>]. Scoring systems aim to use the most predictive premorbid and injury factors to yield an expected likelihood of death for a given patient. Baux and Prognostic Burn Index scores provide a mortality rate by summing age and the percentage of total burn surface area, while the Abbreviated Burns Severity Index also considers gender and the presence of inhalation injuries.</p>
        <p>The evolution of other parameters during the resuscitation phase (first 2 days) and during the stabilization phase (3 following days) may, however, also be important. The initial evaluation and resuscitation of patients with large burns that require inpatient care can be guided only loosely by formulas and rules. The inherent inaccuracy of formulas requires the continuous reevaluation and adjustment of infusions based on resuscitation targets. Incomings, diuresis, fluid balance, acid-base balance (pH, bicarbonate, base excess), and others help define objectives and assess the evolution and treatment response.</p>
        <p>In the ICBU, a patient’s evolution is registered but not considered in scores for mortality prediction. In a previous paper [<xref ref-type="bibr" rid="ref31">31</xref>], we used emerging patterns with a knowledge-based temporal abstraction and then built classifiers of the survival of the patients with a high sensitivity and specificity. The results of the classification tests showed that our approach is comparable to the burn severity scores used currently by physicians.</p>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Sequential Patterns</title>
        <p>Let <italic>I</italic> = {<italic>i</italic><sub>1</sub>, <italic>i</italic><sub>2</sub>, ..., <italic>i<sub>k</sub></italic>} be a set of items. An itemset is a non-empty subset of <italic>I</italic>. A sequence <inline-graphic xlink:href="medinform_v10i8e32319_fig5.png" xlink:type="simple" mimetype="image"/> is an ordered list of itemsets <inline-graphic xlink:href="medinform_v10i8e32319_fig6.png" xlink:type="simple" mimetype="image"/> (also called elements or events). Items within an element are unordered and would be listed alphabetically. An item can occur in an element of a sequence once at the most, but can occur multiple times in different elements of a sequence.</p>
        <p>The number of instances of items in a sequence is denominated as the length of the sequence. A sequence with a length <italic>k</italic> is called a k-sequence. For example, <inline-graphic xlink:href="medinform_v10i8e32319_fig7.png" xlink:type="simple" mimetype="image"/> is a sequence that consists of 7 distinct items {a, b, c, d, e, f, g} and 6 itemsets. The length of the sequence is 12 items.</p>
        <p>Each itemset in a sequence represents the set of events that occur at the same time (same timestamp). A different itemset appears at a different time.</p>
        <p>Sequence <inline-graphic xlink:href="medinform_v10i8e32319_fig8.png" xlink:type="simple" mimetype="image"/> is a subsequence of sequence <inline-graphic xlink:href="medinform_v10i8e32319_fig9.png" xlink:type="simple" mimetype="image"/> (or <italic>β</italic> is a super-sequence of the sequence α), denoted as <inline-graphic xlink:href="medinform_v10i8e32319_fig10.png" xlink:type="simple" mimetype="image"/>, if there exist integers <italic>i</italic><sub>1</sub> &#60; <italic>i</italic><sub>2</sub> &#60; <sup>…</sup> &#60; <italic>i<sub>n</sub></italic> such that <inline-graphic xlink:href="medinform_v10i8e32319_fig11.png" xlink:type="simple" mimetype="image"/>. For example, <inline-graphic xlink:href="medinform_v10i8e32319_fig12.png" xlink:type="simple" mimetype="image"/> is a subsequence of <italic>s</italic>.</p>
        <p>The temporal representation of the patterns is principally carried out using time point representation or time interval representation.</p>
        <p>In the time interval representation, there are different ways in which to relate intervals to each other, of which the best known is Allen’s interval algebra [<xref ref-type="bibr" rid="ref32">32</xref>] or the Time Series Knowledge Representation. In Allen’s interval algebra, there are 13 relations that configure a very expressive language, thus making the pattern representation and the tasks related to temporal reasoning much more complicated.</p>
        <p>Time point–based data are a special case of the time interval–based data, in which both the beginning and the end points occur at the same time (for each interval) and the relations between these points become simpler (before, equals or co-occurs, and after), usually denoted as (&#60;, =, &#62;). Furthermore, because the “after” operator (&#62;) is the inverse of the “before” relation (&#60;), if we always consider a relation from the point that occurs first, it is not necessary to use the “after” relation. For instance, if we have A&#62;B, we will instead say B&#60;A.</p>
        <p>It is, therefore, possible to define patterns or sequences with only these 2 relations (&#60;, =). Two patterns <italic>a</italic> and <italic>b</italic> are exactly equal if their points are exactly the same and they have exactly the same relations in the same positions, that is, <inline-graphic xlink:href="medinform_v10i8e32319_fig13.png" xlink:type="simple" mimetype="image"/> and <inline-graphic xlink:href="medinform_v10i8e32319_fig14.png" xlink:type="simple" mimetype="image"/>.</p>
        <p>We have used the FaSPIP algorithm [<xref ref-type="bibr" rid="ref33">33</xref>] to discover multivariate sequential patterns. FaSPIP is based on the equivalence classes strategy and is able to mine both points and intervals. Moreover, FaSPIP uses a new candidate generation algorithm based on boundary points and efficient methods to avoid the generation of useless candidates and to check their frequency.</p>
        <p>In candidate generation, FaSPIP distinguishes between 2 operations to extend a sequence with an item, thus creating a new sequence: Sequence extensions (S-extensions), when the frequent points take place after, and Itemset extensions (I-extensions), when the points take place at the same time as the last item in the pattern. For instance, given the sequence <inline-graphic xlink:href="medinform_v10i8e32319_fig15.png" xlink:type="simple" mimetype="image"/> and a point <inline-graphic xlink:href="medinform_v10i8e32319_fig16.png" xlink:type="simple" mimetype="image"/>, the sequence <inline-graphic xlink:href="medinform_v10i8e32319_fig17.png" xlink:type="simple" mimetype="image"/> is an S-extension and <inline-graphic xlink:href="medinform_v10i8e32319_fig18.png" xlink:type="simple" mimetype="image"/> is an I-extension.</p>
      </sec>
      <sec>
        <title>Emerging Patterns</title>
        <p>The classical approach employed for pattern selection is based on the frequency of the patterns. Emerging patterns (EPs) or contrast sets are a type of knowledge pattern that describes significant changes (differences or trends) between 2 classes of data [<xref ref-type="bibr" rid="ref34">34</xref>]. EPs are sets of item conjunctions of attribute values whose frequency changes significantly from one data set to another. The problem of mining EPs can be expressed as follows: given 2 classes of data and a growth rate threshold, find all patterns (itemsets) whose growth rates—the ratio of their frequency between the 2 classes—are larger than the threshold [<xref ref-type="bibr" rid="ref3">3</xref>].</p>
        <p>Like other rules or patterns composed of conjunctive combinations of elements, EPs can be easily understood and used directly by clinicians.</p>
        <p>Furthermore, the concept of JEPs [<xref ref-type="bibr" rid="ref35">35</xref>] has been proposed to describe those discriminating features that occur only in the positive training instances but do not occur in the negative class at all. The most frequently appearing JEPs have been used to build accurate classifiers [<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref37">37</xref>].</p>
      </sec>
      <sec>
        <title>Diagnostic Odds Ratio and CI</title>
        <p>Clinicians must rely on the correct interpretation of diagnostic data in a variety of clinical environments. A 2×2 table is an essential tool to present the data regarding epidemiological studies for diagnostic test evaluation (<xref ref-type="table" rid="table2">Table 2</xref>). The terms commonly used with diagnostic tests are sensitivity, specificity, and accuracy, which statistically measure the performance of the test. <italic>Sensitivity</italic> indicates how well the test predicts one category and <italic>specificity</italic> measures how well the test predicts the other category, while <italic>accuracy</italic> is expected to measure how well the test predicts both categories.</p>
        <disp-formula>Sensitivity = TP/(TP+FN)</disp-formula>
        <disp-formula>Specificity = TN/(TN+FP)</disp-formula>
        <p>Other multiple tests with which to improve diagnostic decision making in different clinical situations have also been suggested. For example, Glas et al [<xref ref-type="bibr" rid="ref6">6</xref>] proposed the use of the DOR as a single indicator of diagnostic performance.</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>2×2 Contingency table.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="500"/>
            <col width="250"/>
            <col width="250"/>
            <thead>
              <tr valign="top">
                <td rowspan="2">Test</td>
                <td colspan="2">Reference test</td>
              </tr>
              <tr valign="top">
                <td>Target disorder</td>
                <td>No target disorder</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Positive</td>
                <td>TP<sup>a</sup></td>
                <td>FP<sup>b</sup></td>
              </tr>
              <tr valign="top">
                <td>Negative</td>
                <td>FN<sup>c</sup></td>
                <td>TN<sup>d</sup></td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table2fn1">
              <p><sup>a</sup>TP: true positive.</p>
            </fn>
            <fn id="table2fn2">
              <p><sup>b</sup>FP: false positive.</p>
            </fn>
            <fn id="table2fn3">
              <p><sup>c</sup>FN: false negative.</p>
            </fn>
            <fn id="table2fn4">
              <p><sup>d</sup>TN: true negative.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p>The DOR is used to measure the discriminative power of a diagnostic test: the ratio of the odds of a positive test result among the diseased to the odds of a positive test result among the nondiseased. The DOR is not prevalence dependent, and may be easier to understand, as it is a familiar epidemiological measure. It can be expressed in terms of sensitivity and specificity.</p>
        <disp-formula>DOR = (TP/FN)/(FP/TN) = [sensitivity / (1–sensitivity)] / [(1–specificity) / specificity]</disp-formula>
        <p>The value of a DOR ranges from 0 to infinity. To calculate the DOR, the potential problems involving division by 0 are solved by adding 0.5 to the selected cells in the diagnostic 2×2 table.</p>
        <p>The further the odds ratio is from 1, the more likely it is that those with the disease are exposed when compared with those without the disease (risk factor). A value of 1 means that a test does not discriminate between patients with the disorder and those without it. Values lower than 1 suggest a reduced risk of disease associated with exposure (protection factor).</p>
        <p>CIs for range estimates can be conventionally calculated as shown in the next equation:</p>
        <disp-formula>
          <graphic xlink:href="medinform_v10i8e32319_fig19.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </disp-formula>
        <p>where <italic>Xhm</italic> is the Mantel-Haenszel chi-square and <italic>Z</italic>=1.96 if a confidence of 95% is employed.</p>
        <p>Li et al [<xref ref-type="bibr" rid="ref38">38</xref>] built an algorithm based on the following assumption: if adding an exposure to a rule does not produce a significant change in the DOR, then the rule should not be reported. The DOR between 2 rules is significantly different if their 95% CIs do not overlap.</p>
        <p>Several studies based on the nonoverlapping of the DOR have been performed. Toti et al [<xref ref-type="bibr" rid="ref39">39</xref>] discussed the differences in performance achieved while extracting rules with the different definitions of a nonexposed population, when no pruning criterion is used to filter redundant rules, or when a pruning criterion of redundant rules based on overlapping of 95% CI is added. They confirmed that mining with no pruning criterion produces a high number of redundant rules, thus proving the need for a process with which to eliminate them. Toti et al [<xref ref-type="bibr" rid="ref40">40</xref>] in another study explained that the traditional interest metrics of support and confidence need to be substituted for metrics that focus on risk variations caused by different exposures. They proposed 2 postprocessing pruning criteria: a rule is pruned if its 95% CI for the DOR crosses the value of 1 or if there is no overlapping of the 95% CI of the rule with all of its parents.</p>
      </sec>
      <sec>
        <title>Case Study</title>
        <p>A database contains 480 patient registries, which were recorded between 1992 and 2002. In this database, the temporal attributes that allow the monitoring and evaluation of the response to the treatment of patients are recorded once a day for 5 days. All attributes are continuous variables and represent the value accumulated during 24 hours. The registered variables are (1) total of managed liquids measured in cubic centimeters (cc) represented in the patterns as <italic>INC</italic>; (2) diuresis in cubic decimeters (dc) represented in the patterns as <italic>DIUR</italic>; (3) balance of fluids in cubic decimeters (dc) represented in the patterns as <italic>BAL</italic>; (4) pH; (5) bicarbonate in millimoles/liter (mmol/L) represented in the patterns as <italic>BIC</italic>; and (6) excess base in milliequivalents/liter (mEq/L) represented in the patterns as <italic>BE</italic>. Note that fluid balance is not the difference between revenues and diuresis, but is rather considered to be all the possible eliminations of fluids.</p>
        <p>We have removed from the database only those patients who died during the course of the study or those for whom it was not possible to estimate the duration of their hospital stay. After this cleansing, 465 patients remained, of whom 378 patients (81.3%) survived, 324 patients (69.7%) were male, and 201 patients (43.2%) had inhalation injuries. <xref ref-type="table" rid="table3">Table 3</xref> provides a summary of the static attributes of the database.</p>
        <table-wrap position="float" id="table3">
          <label>Table 3</label>
          <caption>
            <p>Attribute summary.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="340"/>
            <col width="160"/>
            <col width="170"/>
            <col width="170"/>
            <col width="160"/>
            <thead>
              <tr valign="top">
                <td>Attribute</td>
                <td>Minimum</td>
                <td>Maximum</td>
                <td>Median</td>
                <td>SD</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Age (years)</td>
                <td>9</td>
                <td>95</td>
                <td>46.42</td>
                <td>20.34</td>
              </tr>
              <tr valign="top">
                <td>Weight (kg)</td>
                <td>25</td>
                <td>120</td>
                <td>71.05</td>
                <td>10.77</td>
              </tr>
              <tr valign="top">
                <td>Length of stay (days)</td>
                <td>3</td>
                <td>162</td>
                <td>25.02</td>
                <td>24.24</td>
              </tr>
              <tr valign="top">
                <td>Total burn surface area (%)</td>
                <td>1</td>
                <td>90</td>
                <td>31.28</td>
                <td>20.16</td>
              </tr>
              <tr valign="top">
                <td>Deep burn surface area (%)</td>
                <td>0</td>
                <td>90</td>
                <td>17.01</td>
                <td>17.41</td>
              </tr>
              <tr valign="top">
                <td>Simplified Acute Physiology Score</td>
                <td>6</td>
                <td>58</td>
                <td>20.67</td>
                <td>9.49</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Experiments</title>
        <p>We carried out the experiments by following the 4-step knowledge discovery process described in our previous paper [<xref ref-type="bibr" rid="ref31">31</xref>]: (1) preprocessing, (2) mining, (3) pattern selection, and (4) classification.</p>
        <p>In the first step, the preprocessing was carried out by employing 2 different discretization methods for the continuous attributes. One method was attribute discretization performed by an expert. This method provided the patterns with greater interpretability, because they are expressed in clinical language. The other method is the unsupervised correlation preserving discretization (UCPD), because it provided the best classification in comparison to several automatic discretization algorithms [<xref ref-type="bibr" rid="ref41">41</xref>].</p>
        <p>In the second step, we used the FaSPIP algorithm [<xref ref-type="bibr" rid="ref33">33</xref>] to discover multivariate sequential patterns. We considered pattern supports ranging from 16% to 6% to find the greatest support that generates the smallest number of patterns with the best classification results. This, therefore, enabled us to obtain interesting patterns, ranging from a small number to thousands of them (<xref ref-type="table" rid="table4">Table 4</xref>).</p>
        <p>The best results were not produced with the lowest supports, which seems to imply that there is no overfitting.</p>
        <p>The third step consisted of reducing the number of patterns found to select only those that would be relevant for the classification. If the support used in the previous step is low, the number of frequent patterns increases acutely: the pattern explosion phenomenon is one important disadvantage of using patterns as predictors for classifiers.</p>
        <p>We decided to use a baseline experiment to compare it with our proposed methods. We therefore employed the frequency property (because it is frequently used to measure interestingness) to select discriminative patterns. To this end, we selected only JEPs that are not common in the subset of nonsurvivors and survivors, thus enabling us to remove common behavior or a patient’s evolution that is not discriminative.</p>
        <p>Finally, the fourth step consisted of building a classification model with the constraint that it had to be interpretable. We wished to obtain a model with a small number of patterns that would be easy for the physician to interpret. In this case, we used a rule learner and a decision tree.</p>
        <p>On the one hand, we used Repeated Incremental Pruning to Produce Error Reduction (RIPPER) as a rule learner. With this sequential covering algorithm, rules are learned one at a time, and each time a rule is learned, the tuples covered by the rule are removed. This process is repeated until there are no more training examples or if the quality of a rule obtained is below a user-specified threshold. JRIP (the implementation of RIPPER in WEKA) is one of the best classification algorithms to combine human readability and accuracy [<xref ref-type="bibr" rid="ref42">42</xref>].</p>
        <p>On the other hand, we choose the J48 decision tree implemented by WEKA for the C4.5 algorithm. This employs a greedy technique that is a variant of ID3, which determines the most predictive attribute in each step, and splits a node based on this attribute. Mohamed et al [<xref ref-type="bibr" rid="ref43">43</xref>] explained that J48 produces high accuracy of classification and simple tree structure. Moreover, Jiménez et al [<xref ref-type="bibr" rid="ref19">19</xref>] showed that the J48 decision tree algorithm provides the simplest model using the ICBU data set, and thus it is easily interpretable by physicians.</p>
        <p>In all cases, we configured the classifiers with the same minimum number of elements in each leaf to 2% and also with the minimal weights of rule instances within a split to 2%. The accuracy, sensitivity, specificity, and AUC were calculated using a 10-fold cross validation.</p>
        <table-wrap position="float" id="table4">
          <label>Table 4</label>
          <caption>
            <p>Number of interesting patterns selected after mining on the subset of survivors and on the set of nonsurvivors for UCPD<sup>a</sup> and expert discretization</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="90"/>
            <col width="140"/>
            <col width="80"/>
            <col width="90"/>
            <col width="90"/>
            <col width="80"/>
            <col width="80"/>
            <col width="80"/>
            <col width="80"/>
            <col width="60"/>
            <col width="100"/>
            <thead>
              <tr valign="top">
                <td rowspan="2" colspan="2">Discretization and support (%)</td>
                <td rowspan="2">Survival + death initial patterns</td>
                <td rowspan="2">Baseline JEPs<sup>b</sup></td>
                <td colspan="2">Experiment 1, DOR<sup>c</sup></td>
                <td colspan="2">Experiment 2, differential DOR</td>
                <td colspan="2">Experiment 3, nonoverlapping DOR</td>
                <td colspan="2">Experiment 4, differential + nonoverlapping DOR</td>
              </tr>
              <tr valign="top">
                <td>&#60;.08, &#62;16</td>
                <td>&#60;.04, &#62;32</td>
                <td>All</td>
                <td>Best</td>
                <td>All</td>
                <td>Best</td>
                <td>All</td>
                <td>Best</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="2">
                  <bold>Expert</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="3">
                  <break/>
                </td>
                <td>10</td>
                <td>46,041 + 83,015</td>
                <td>391</td>
                <td>2065</td>
                <td>750</td>
                <td>2795</td>
                <td>2359</td>
                <td>858</td>
                <td>746</td>
                <td>236</td>
                <td>198</td>
              </tr>
              <tr valign="top">
                <td>8</td>
                <td>88,084 + 241,866</td>
                <td>4931</td>
                <td>14,424</td>
                <td>5798</td>
                <td>10,655</td>
                <td>8781</td>
                <td>2195</td>
                <td>1856</td>
                <td>701</td>
                <td>504</td>
              </tr>
              <tr valign="top">
                <td>6</td>
                <td>224,952 + 492,504</td>
                <td>47,113</td>
                <td>51,352</td>
                <td>41,059</td>
                <td>32,406</td>
                <td>26,157</td>
                <td>4545</td>
                <td>3803</td>
                <td>1556</td>
                <td>1293</td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>UCPD</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="3">
                  <break/>
                </td>
                <td>16</td>
                <td>238,337 + 49,947</td>
                <td>2179</td>
                <td>14,158</td>
                <td>2766</td>
                <td>2401</td>
                <td>1990</td>
                <td>1529</td>
                <td>1415</td>
                <td>325</td>
                <td>272</td>
              </tr>
              <tr valign="top">
                <td>14</td>
                <td>396,238 + 68,654</td>
                <td>7556</td>
                <td>33,979</td>
                <td>7483</td>
                <td>4153</td>
                <td>3465</td>
                <td>2296</td>
                <td>2052</td>
                <td>487</td>
                <td>411</td>
              </tr>
              <tr valign="top">
                <td>12</td>
                <td>647,943 + 137,546</td>
                <td>22,940</td>
                <td>65,564</td>
                <td>16,272</td>
                <td>9907</td>
                <td>8173</td>
                <td>6418</td>
                <td>5228</td>
                <td>1397</td>
                <td>1212</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table4fn1">
              <p><sup>a</sup>UCPD: unsupervised correlation preserving discretization.</p>
            </fn>
            <fn id="table4fn2">
              <p><sup>b</sup>JEP: Jumping Emerging Pattern.</p>
            </fn>
            <fn id="table4fn3">
              <p><sup>c</sup>DOR: diagnostic odds ratio.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Ethics Approval</title>
        <p>The study was approved by the Ethics Committee of Hospital Universitario de Getafe (38/17, approved on 30/11/2017). This research study was conducted from data obtained for clinical purposes. Informed consent was not required.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Overview</title>
        <p>The results of the baseline experiment and the results of our 4 different proposals using the DOR are shown below. The number of patterns generated in the subset of survivors and in the set of nonsurvivors with different supports is shown in <xref ref-type="table" rid="table4">Table 4</xref>. We also studied the length of the patterns produced (<xref ref-type="table" rid="table5">Table 5</xref>). A short pattern is simpler and more general (it covers more patients). However, a long pattern is more specific (covers fewer patients) and is harder to understand. It is, therefore, more difficult to build a classifier with short patterns.</p>
        <p>In the discussion, we explore 3 aspects: classification performance, number and length of patterns selected, and classification interpretability.</p>
        <table-wrap position="float" id="table5">
          <label>Table 5</label>
          <caption>
            <p>Number (and percentage) of interesting patterns by length (from 2 to 10) for 8% expert discretization and selecting all the patterns when it is possible.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="110"/>
            <col width="120"/>
            <col width="160"/>
            <col width="170"/>
            <col width="130"/>
            <col width="120"/>
            <col width="190"/>
            <thead>
              <tr valign="top">
                <td>Pattern length</td>
                <td>Baseline JEPs<sup>a</sup> (n=4931)</td>
                <td>Experiment 1a, DOR<sup>b</sup> (&#60;0.08, &#62;16)<break/>(n=14,424)</td>
                <td>Experiment 1b, DOR (&#60;0.04, &#62;32) (n=5798)</td>
                <td>Experiment 2, differential DOR<break/>(n=10,655)</td>
                <td>Experiment 3, nonoverlapping DOR (n=2195)</td>
                <td>Experiment 4, differential + nonoverlapping DOR<break/>(n=701)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>2</td>
                <td>0 (0)</td>
                <td>5 (0.0)</td>
                <td>0 (0)</td>
                <td>289 (2.7)</td>
                <td>76 (3.5)</td>
                <td>39 (5.6)</td>
              </tr>
              <tr valign="top">
                <td>3</td>
                <td>41 (0.8)</td>
                <td>187 (1.3)</td>
                <td>49 (0.8)</td>
                <td>2063 (19.4)</td>
                <td>461 (21.0)</td>
                <td>198 (28.2)</td>
              </tr>
              <tr valign="top">
                <td>4</td>
                <td>542 (11.0)</td>
                <td>1610 (11.2)</td>
                <td>552 (9.5)</td>
                <td>3912 (36.7)</td>
                <td>857 (39.0)</td>
                <td>299 (42.7)</td>
              </tr>
              <tr valign="top">
                <td>5</td>
                <td>1377 (27.9)</td>
                <td>4176 (29.0)</td>
                <td>1545 (26.6)</td>
                <td>3004 (28.2)</td>
                <td>612 (27.9)</td>
                <td>140 (20.0)</td>
              </tr>
              <tr valign="top">
                <td>6</td>
                <td>1518 (30.8)</td>
                <td>4811 (33.4)</td>
                <td>1960 (33.8)</td>
                <td>1155 (10.8)</td>
                <td>175 (8.0)</td>
                <td>23 (3.3)</td>
              </tr>
              <tr valign="top">
                <td>7</td>
                <td>987 (20.0)</td>
                <td>2698 (18.7)</td>
                <td>1190 (20.5)</td>
                <td>212 (2)</td>
                <td>14 (0.6)</td>
                <td>2 (0.3)</td>
              </tr>
              <tr valign="top">
                <td>8</td>
                <td>372 (7.5)</td>
                <td>785 (5.4)</td>
                <td>407 (7.0)</td>
                <td>20 (0.2)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
              </tr>
              <tr valign="top">
                <td>9</td>
                <td>84 (1.7)</td>
                <td>139 (1.0)</td>
                <td>85 (1.5)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
              </tr>
              <tr valign="top">
                <td>10</td>
                <td>10 (0.2)</td>
                <td>13 (0.1)</td>
                <td>10 (0.2)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table5fn1">
              <p><sup>a</sup>JEP: Jumping Emerging Pattern.</p>
            </fn>
            <fn id="table5fn2">
              <p><sup>b</sup>DOR: diagnostic odds ratio.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Baseline Experiment: Using JEPs</title>
        <p>In the baseline experiment, we searched for discriminative patterns, one of the most important techniques in data mining [<xref ref-type="bibr" rid="ref44">44</xref>], where the patterns are pruned using only support properties. We selected JEPs, signifying that we maintained patterns found only in the survivors and patterns that occurred exclusively in the nonsurvivors. In a previous paper [<xref ref-type="bibr" rid="ref31">31</xref>], we verified that this type of emerging patterns produces the best classification results. Furthermore, in this way there is no need to set a threshold that could bring out different results.</p>
        <p><xref ref-type="table" rid="table6">Table 6</xref> depicts the results of the experiments carried out using 2 discretization algorithms and by varying the pattern support.</p>
        <table-wrap position="float" id="table6">
          <label>Table 6</label>
          <caption>
            <p>Results of the baseline experiment with JEPs.<sup>a,b</sup></p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="30"/>
            <col width="210"/>
            <col width="110"/>
            <col width="110"/>
            <col width="140"/>
            <col width="110"/>
            <col width="110"/>
            <col width="90"/>
            <col width="60"/>
            <thead>
              <tr valign="top">
                <td colspan="3">Classifier, discretization, and pattern support (%)</td>
                <td>Number of patterns</td>
                <td>Total length (items)</td>
                <td>Average length (items/pattern)</td>
                <td>Sensitivity (%)</td>
                <td>Specificity (%)</td>
                <td>Accuracy (%)</td>
                <td>AUC<sup>c</sup></td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="3">
                  <bold>J48</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">
                  <bold>Expert</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  10
                </td>
                <td>7</td>
                <td>33</td>
                <td>4.71</td>
                <td>100.00</td>
                <td>43.68</td>
                <td>89.46</td>
                <td>0.709</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  8
                </td>
                <td>
                  <italic>17</italic>
                </td>
                <td>
                  <italic>84</italic>
                </td>
                <td>
                  <italic>4.94</italic>
                </td>
                <td>
                  <italic>100.00</italic>
                </td>
                <td>
                  <italic>56.32</italic>
                </td>
                <td>
                  <italic>91.83</italic>
                </td>
                <td>
                  <italic>0.782</italic>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  6
                </td>
                <td>
                  16
                </td>
                <td>
                 80
                </td>
                <td>
                 5
                </td>
                <td>
                  100.00
                </td>
                <td>
                  44.83
                </td>
                <td>
                 89.68
                </td>
                <td>
                  0.720
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">
                  <bold>UCPD<sup>d</sup></bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  16
                </td>
                <td>
                  8
                </td>
                <td>
                  29
                </td>
                <td>
                 3.63
                </td>
                <td>
                 100.00
                </td>
                <td>
                 52.87
                </td>
                <td>
                91.18
                </td>
                <td>
                 0.763
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  14
                </td>
                <td>
                  <italic>10</italic>
                </td>
                <td>
                  <italic>37</italic>
                </td>
                <td>
                  <italic>3.7</italic>
                </td>
                <td>
                  <italic>100.00</italic>
                </td>
                <td>
                  <italic>66.67</italic>
                </td>
                <td>
                  <italic>93.76</italic>
                </td>
                <td>
                  <italic>0.853</italic>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  12
                </td>
                <td>
                 12
                </td>
                <td>
                 48
                </td>
                <td>
                4
                </td>
                <td>
                 100.00
                </td>
                <td>
                59.77
                </td>
                <td>
                  92.47
                </td>
                <td>
                0.796
                </td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>JRIP</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">
                  <bold>Expert</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  10
                </td>
                <td>8</td>
                <td>37</td>
                <td>4.63</td>
                <td>100.00</td>
                <td>40.23</td>
                <td>88.82</td>
                <td>0.704</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  8
                </td>
                <td>
                  <italic>15</italic>
                </td>
                <td>
                  <italic>79</italic>
                </td>
                <td>
                  <italic>5.27</italic>
                </td>
                <td>
                  <italic>100.00</italic>
                </td>
                <td>
                  <italic>58.62</italic>
                </td>
                <td>
                  <italic>92.26</italic>
                </td>
                <td>
                  <italic>0.777</italic>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  6
                </td>
                <td>
                  18
                </td>
                <td>
                  87
                </td>
                <td>
                  4.83
                </td>
                <td>
                  100.00
                </td>
                <td>
                  44.83
                </td>
                <td>
                  89.68
                </td>
                <td>
                  0.729
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">
                  <bold>UCPD</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  16
                </td>
                <td>
                  7
                </td>
                <td>
                  34
                </td>
                <td>
                  4.86
                </td>
                <td>
                 100.00
                </td>
                <td>
                  47.13
                </td>
                <td>
                  90.11
                </td>
                <td>
                 0.711
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  14
                </td>
                <td>
                  <italic>10</italic>
                </td>
                <td>
                  <italic>35</italic>
                </td>
                <td>
                  <italic>3.5</italic>
                </td>
                <td>
                  <italic>100.00</italic>
                </td>
                <td>
                  <italic>73.56</italic>
                </td>
                <td>
                  <italic>95.05</italic>
                </td>
                <td>
                  <italic>0.866</italic>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  12
                </td>
                <td>
                 12
                </td>
                <td>
                 51
                </td>
                <td>
                  4.25
                </td>
                <td>
                  100.00
                </td>
                <td>
                  62.07
                </td>
                <td>
                 92.90
                </td>
                <td>
                 0.833
                </td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table6fn1">
              <p><sup>a</sup>JEP: Jumping Emerging Pattern.</p>
            </fn>
            <fn id="table6fn2">
              <p><sup>b</sup>Highest specificity is in italics.</p>
            </fn>
            <fn id="table6fn3">
              <p><sup>c</sup>AUC: area under the receiver operating characteristic curve.</p>
            </fn>
            <fn id="table6fn4">
              <p><sup>d</sup>UCPD: unsupervised correlation preserving discretization.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p>As will be noted, the JEPs make it possible to achieve a sensitivity of 100%, but the specificity has lower values. This is due to the fact that the data set is imbalanced with a majority of survivors, and the patterns cover only those patients that will survive or those that will die. It is necessary to achieve a higher specificity to predict the nonsurvivors, so the highest specificity is in italics in <xref ref-type="table" rid="table6">Table 6</xref> as a baseline best result.</p>
        <p>The expert discretization is preferred by clinicians, because it is based principally on reference ranges values. But note that it is possible to improve the results by using an automatic discretization, such as UCPD (see [<xref ref-type="bibr" rid="ref41">41</xref>]).</p>
        <p>When using expert discretization, the highest specificity (58.62%) is obtained using the JRIP classifier with 8% support.</p>
        <p>This classifier requires 15 patterns, with a total length of 79 items, with the average length per pattern being 5.27 items. As an example, we show a pattern found in the subset of nonsurvivors. For each variable, the subindex <italic>i</italic> marks the <italic>i</italic> discretization interval where <italic>i</italic>=0 is the lowest interval:</p>
        <disp-formula>&#60; <italic>BAL</italic><sub>4</sub> &#60; <italic>BIC</italic><sub>1</sub> &#60; <italic>DIUR</italic><sub>2</sub> &#60; <italic>BE</italic><sub>0</sub> (10 nonsurvivors, 0 survivors)</disp-formula>
        <p>There is also an interesting pattern that appears in all the 5 experiments for the subset of nonsurvivors:</p>
        <disp-formula>&#60; <italic>DIUR</italic><sub>3</sub> &#60; <italic>INC</italic><sub>0</sub> &#60; <italic>INC</italic><sub>0</sub> &#60; <italic>DIUR</italic><sub>3</sub> (10 nonsurvivors, 0 survivors)</disp-formula>
        <p>It would, therefore, be possible to interpret this pattern as “a patient will die if his/her diuresis is very high on one day, and during the next 2 days there is a low income with a very high diuresis the following day.”</p>
      </sec>
      <sec>
        <title>Experiment 1: Using the DOR</title>
        <p>In this experiment, we calculated the DOR for each pattern as shown in “Methods” section. In clinical language, a DOR&#62;1 implies that the exposure to the pattern is a risk factor. Conversely, a DOR&#60;1 implies that the pattern is a protection factor and selecting a DOR threshold with a very low value therefore suggests a reduced risk of disease associated with exposure. A value of DOR=1 signifies that the pattern does not discriminate between patients with the disorder and those without it.</p>
        <p>The selection of patterns with either a high value or a low value for the DOR will therefore generate discriminative patterns. It is necessary to establish a manual threshold for the value of the DOR to choose the patterns. We have carried out 2 experiments. In the first experiment (1a), we have selected the patterns with a DOR value higher than 16 or lower than 0.08, and in the second experiment (1b), we have selected more exigent values, which were double or half the DOR value, that is, with a DOR value higher than 32 or lower than 0.04. This allowed us to reduce the number of patterns (<xref ref-type="table" rid="table4">Table 4</xref>) and we obtained a number of patterns in Experiment 1b that were similar to those obtained in the previous experiment. In the more exigent configuration, the length of the selected patterns was almost 6 (<xref ref-type="table" rid="table5">Table 5</xref>), which was again similar to the baseline experiment.</p>
        <p><xref ref-type="table" rid="table7">Tables 7</xref> and <xref ref-type="table" rid="table8">8</xref> show the classification performance of the 2 experiments using expert discretization and UCPD methods with different pattern supports. Expert discretization makes it possible to attain better results than when using JEPs in the previous experiment (<xref ref-type="table" rid="table6">Table 6</xref>), and worse results than when using UCPD.</p>
        <table-wrap position="float" id="table7">
          <label>Table 7</label>
          <caption>
            <p>Results of Experiment 1a using the DOR<sup>a</sup> (&#60;0.08, &#62;16).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="30"/>
            <col width="140"/>
            <col width="160"/>
            <col width="120"/>
            <col width="140"/>
            <col width="110"/>
            <col width="110"/>
            <col width="100"/>
            <col width="60"/>
            <thead>
              <tr valign="top">
                <td colspan="3">Classifier, discretization, and pattern support (%)</td>
                <td>Number of patterns</td>
                <td>Total length (items)</td>
                <td>Average length (items/pattern)</td>
                <td>Sensitivity (%)</td>
                <td>Specificity (%)</td>
                <td>Accuracy (%)</td>
                <td>AUC<sup>b</sup></td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="3">
                  <bold>J48</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">
                  <bold>Expert</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="7">
                  <break/>
                </td>
                <td rowspan="3">
                  <break/>
                </td>
                <td>10</td>
                <td>13</td>
                <td>67</td>
                <td>5.15</td>
                <td>90.21</td>
                <td>62.07</td>
                <td>84.95</td>
                <td>0.766</td>
              </tr>
              <tr valign="top">
                <td>
                  8
                </td>
                <td>
                  18
                </td>
                <td>
                  89
                </td>
                <td>
                  4.94
                </td>
                <td>
                  88.62
                </td>
                <td>
                 58.62
                </td>
                <td>
                 83.01
                </td>
                <td>
                  0.759
                </td>
              </tr>
              <tr valign="top">
                <td>
                  6
                </td>
                <td>
               16
                </td>
                <td>
                 80
                </td>
                <td>
                5
                </td>
                <td>
                 91.80
                </td>
                <td>
                  47.13
                </td>
                <td>
                 83.44
                </td>
                <td>
                  0.702
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>UCPD<sup>c</sup></bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="3">
                  <break/>
                </td>
                <td>
                  16
                </td>
                <td>
                  8
                </td>
                <td>
                  29
                </td>
                <td>
                  3.62
                </td>
                <td>
                  100.00
                </td>
                <td>
                  52.87
                </td>
                <td>
                  91.18
                </td>
                <td>
                 0.763
                </td>
              </tr>
              <tr valign="top">
                <td>
                 14
                </td>
                <td>
                 11
                </td>
                <td>
                  43
                </td>
                <td>
                 3.91
                </td>
                <td>100.00</td>
                <td>62.07</td>
                <td>92.90</td>
                <td>0.787</td>
              </tr>
              <tr valign="top">
                <td>
                  12
                </td>
                <td>
                  12
                </td>
                <td>
                  48
                </td>
                <td>
                  4
                </td>
                <td>
                  100.00
                </td>
                <td>
                  59.77
                </td>
                <td>
                  92.47
                </td>
                <td>
                  0.796
                </td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>JRIP</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">
                  <bold>Expert</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="7">
                  <break/>
                </td>
                <td rowspan="3">
                  <break/>
                </td>
                <td>10</td>
                <td>10</td>
                <td>46</td>
                <td>4.6</td>
                <td>91.27</td>
                <td>55.17</td>
                <td>84.52</td>
                <td>0.716</td>
              </tr>
              <tr valign="top">
                <td>
                8
                </td>
                <td>
                  12
                </td>
                <td>
                  58
                </td>
                <td>
                 4.83
                </td>
                <td>
                 93.12
                </td>
                <td>
                 54.02
                </td>
                <td>
                 85.81
                </td>
                <td>
                 0.720
                </td>
              </tr>
              <tr valign="top">
                <td>
                  6
                </td>
                <td>
                 14
                </td>
                <td>
                 67
                </td>
                <td>
                  4.79
                </td>
                <td>
                 94.44
                </td>
                <td>
                 52.87
                </td>
                <td>
                  86.67
                </td>
                <td>
                 0.706
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>UCPD</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="3">
                  <break/>
                </td>
                <td>
                  16
                </td>
                <td>
                 8
                </td>
                <td>
                  33
                </td>
                <td>
                  4.13
                </td>
                <td>
                  100.00
                </td>
                <td>
                  41.38
                </td>
                <td>
                  89.03
                </td>
                <td>
                  0.716
                </td>
              </tr>
              <tr valign="top">
                <td>
                 14
                </td>
                <td>
                  12
                </td>
                <td>
                  47
                </td>
                <td>
                  3.92
                </td>
                <td>100.00</td>
                <td>62.07</td>
                <td>92.90</td>
                <td>0.828</td>
              </tr>
              <tr valign="top">
                <td>
                  12
                </td>
                <td>
                  12
                </td>
                <td>
                  46
                </td>
                <td>
                  3.83
                </td>
                <td>
                  100.00
                </td>
                <td>
                  59.77
                </td>
                <td>
                  92.47
                </td>
                <td>
                  0.816
                </td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table7fn1">
              <p><sup>a</sup>DOR: diagnostic odds ratio.</p>
            </fn>
            <fn id="table7fn2">
              <p><sup>b</sup>AUC: area under the receiver operating characteristic curve.</p>
            </fn>
            <fn id="table7fn3">
              <p><sup>c</sup>UCPD: unsupervised correlation preserving discretization.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <table-wrap position="float" id="table8">
          <label>Table 8</label>
          <caption>
            <p>Results of Experiment 1b using the DOR<sup>a</sup> (&#60;0.04, &#62;32).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="30"/>
            <col width="140"/>
            <col width="160"/>
            <col width="120"/>
            <col width="140"/>
            <col width="110"/>
            <col width="110"/>
            <col width="100"/>
            <col width="60"/>
            <thead>
              <tr valign="top">
                <td colspan="3">Classifier, discretization, and pattern support (%)</td>
                <td>Number of patterns</td>
                <td>Total length (items)</td>
                <td>Average length (items/pattern)</td>
                <td>Sensitivity (%)</td>
                <td>Specificity (%)</td>
                <td>Accuracy (%)</td>
                <td>AUC<sup>b</sup></td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="3">
                  <bold>J48</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">
                  <bold>Expert</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="7">
                  <break/>
                </td>
                <td rowspan="3">
                  <break/>
                </td>
                <td>10</td>
                <td>10</td>
                <td>49</td>
                <td>4.9</td>
                <td>93.65</td>
                <td>50.57</td>
                <td>85.59</td>
                <td>0.710</td>
              </tr>
              <tr valign="top">
                <td>
                  8
                </td>
                <td>
                  17
                </td>
                <td>
                  84
                </td>
                <td>
                  4.94
                </td>
                <td>94.18</td>
                <td>55.17</td>
                <td>86.88</td>
                <td>0.767</td>
              </tr>
              <tr valign="top">
                <td>
                  6
                </td>
                <td>
                  16
                </td>
                <td>
                  80
                </td>
                <td>
                  5
                </td>
                <td>
                  95.50
                </td>
                <td>
                  37.93
                </td>
                <td>
                  84.73
                </td>
                <td>
                  0.656
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>UCPD<sup>c</sup></bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="3">
                  <break/>
                </td>
                <td>
                  16
                </td>
                <td>
                  8
                </td>
                <td>
                  29
                </td>
                <td>
                  3.62
                </td>
                <td>
                  100.00
                </td>
                <td>
                  52.87
                </td>
                <td>
                  91.18
                </td>
                <td>
                  0.763
                </td>
              </tr>
              <tr valign="top">
                <td>
                  14
                </td>
                <td>
                  11
                </td>
                <td>
                  43
                </td>
                <td>
                  3.91
                </td>
                <td>100.00</td>
                <td>62.07</td>
                <td>92.90</td>
                <td>0.787</td>
              </tr>
              <tr valign="top">
                <td>
                  12
                </td>
                <td>
                  12
                </td>
                <td>
                  48
                </td>
                <td>
                  4
                </td>
                <td>
                  100.00
                </td>
                <td>
                  59.77
                </td>
                <td>
                  92.47
                </td>
                <td>
                  0.796
                </td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>JRIP</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">
                  <bold>Expert</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="7">
                  <break/>
                </td>
                <td rowspan="3">
                  <break/>
                </td>
                <td>10</td>
                <td>11</td>
                <td>50</td>
                <td>4.55</td>
                <td>97.09</td>
                <td>44.83</td>
                <td>87.31</td>
                <td>0.704</td>
              </tr>
              <tr valign="top">
                <td>
                  8
                </td>
                <td>
                 14
                </td>
                <td>
                  67
                </td>
                <td>
                  4.79
                </td>
                <td>95.50</td>
                <td>62.07</td>
                <td>89.25</td>
                <td>0.801</td>
              </tr>
              <tr valign="top">
                <td>
                  6
                </td>
                <td>
                  16
                </td>
                <td>
                  87
                </td>
                <td>
                  5.44
                </td>
                <td>
                  98.15
                </td>
                <td>
                  48.28
                </td>
                <td>
                  88.82
                </td>
                <td>
                  0.715
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>UCPD</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="3">
                  <break/>
                </td>
                <td>
                  16
                </td>
                <td>
                  7
                </td>
                <td>
                  26
                </td>
                <td>
                  3.71
                </td>
                <td>
                  100.00
                </td>
                <td>
                  47.13
                </td>
                <td>
                  90.11
                </td>
                <td>
                  0.727
                </td>
              </tr>
              <tr valign="top">
                <td>
                  14
                </td>
                <td>
                  11
                </td>
                <td>
                  45
                </td>
                <td>
                  4.09
                </td>
                <td>100.00</td>
                <td>60.92</td>
                <td>92.69</td>
                <td>0.792</td>
              </tr>
              <tr valign="top">
                <td>
                  12
                </td>
                <td>
                  14
                </td>
                <td>
                  55
                </td>
                <td>
                  3.93
                </td>
                <td>
                  100.00
                </td>
                <td>
                  60.92
                </td>
                <td>
                  92.69
                </td>
                <td>
                  0.822
                </td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table8fn1">
              <p><sup>a</sup>DOR: diagnostic odds ratio.</p>
            </fn>
            <fn id="table8fn2">
              <p><sup>b</sup>AUC: area under the receiver operating characteristic curve.</p>
            </fn>
            <fn id="table8fn3">
              <p><sup>c</sup>UCPD: unsupervised correlation preserving discretization.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p>If we choose expert discretization, with a JRIP classifier and the highest values of the DOR (<xref ref-type="table" rid="table8">Table 8</xref>), we obtain a higher specificity than with JEPs (62.07%), but a lower sensitivity (95.50%). This can be explained as follows: if we look at one of the 14 patterns used in that classifier, we can find an example of a short pattern with only 3 items:</p>
        <disp-formula><italic>BIC</italic><sub>1</sub> &#60; <italic>BAL</italic><sub>4</sub> &#60; <italic>PH</italic><sub>1</sub> (72.30 DOR) (14 nonsurvivors, 1 survivor)</disp-formula>
        <p>This pattern, with a DOR value of 72.30, classifies a group of patients that will die, although we know that there will be minimal errors (1 patient survives).</p>
        <p>We selected the pattern <italic>DIUR</italic><sub>3</sub> &#60; <italic>INC</italic><sub>0</sub> &#60; <italic>INC</italic><sub>0</sub> &#60; <italic>DIUR</italic><sub>3</sub> in this experiment because it has a DOR value of 98.05, and it is necessary to recall that all the patients in this pattern will die (10 deaths, 0 survivors). This kind of JEP therefore produces a good specificity, and consequently 100% sensitivity (there are no classification errors).</p>
      </sec>
      <sec>
        <title>Experiment 2: Using the Differential DOR Between a Pattern and Its Extensions</title>
        <p>A sequential pattern <italic>p<sub>i</sub></italic>, of a specific length (<italic>l</italic>), in a point in time (<italic>t</italic>), has a DOR value <italic>DOR</italic>(<italic>p<sub>i</sub></italic>). In every extension of this pattern (<italic>l</italic>+1), which could be an S-extension (in the next time, <italic>t</italic>+1) or an I-extension (in the same time, <italic>t</italic>), there will be <italic>n</italic> several patterns (<italic>p<sub>i</sub></italic><sub>1</sub>, <italic>p<sub>i</sub></italic><sub>2</sub>, ..., <italic>p<sub>in</sub></italic>) that are children of super-pattern <italic>p<sub>i</sub></italic> with different DOR values, <inline-graphic xlink:href="medinform_v10i8e32319_fig20.png" xlink:type="simple" mimetype="image"/>. In this experiment, we choose only the patterns that had a difference in DOR value between the super-pattern and its extensions higher than a threshold <italic>γ</italic>, that is <italic>DOR</italic>(<italic>p<sub>i</sub></italic>) – <italic>DOR</italic>(<italic>p<sub>ij</sub></italic>) &#62; <italic>γ</italic>.</p>
        <p>For a better interpretation of the DOR, we calculated the risk factor probability <italic>R</italic>(<italic>p<sub>i</sub></italic>) and the protection factor probability <italic>P</italic>(<italic>p<sub>i</sub></italic>) as shown in the next equations:</p>
        <disp-formula><italic>R</italic>(<italic>p<sub>i</sub></italic>) = <italic>DOR</italic>(<italic>p<sub>i</sub></italic>)/[<italic>DOR</italic>(<italic>p<sub>i</sub></italic>) + 1]</disp-formula>
        <disp-formula><italic>P</italic>(<italic>p<sub>i</sub></italic>) = 1 – <italic>R</italic>(<italic>p<sub>i</sub></italic>)</disp-formula>
        <p>In our experiment we, therefore, selected the patterns with 2 conditions: (1) when the difference between the risk factor probability <italic>R</italic>(<italic>p<sub>i</sub></italic>) was greater than 25% or (2) when the difference between the protection factor probability <italic>P</italic>(<italic>p<sub>i</sub></italic>) was greater than 30%. We chose a lower threshold value for <italic>R</italic>(<italic>p<sub>i</sub></italic>) because we wished to obtain a higher specificity by having more patterns that were representative of nonsurvivors. In this experiment we obtained patterns with a high quality that produced great changes in the evolution of the patients.</p>
        <p>We additionally used 2 alternative strategies to select patterns: it is possible to maintain all the extensions with a difference in the DOR value that is higher than a threshold or to explore the extensions with a beam search, in which case we select only the most promising extension with the highest DOR difference among all extensions. <xref ref-type="table" rid="table9">Tables 9</xref> and <xref ref-type="table" rid="table10">10</xref> show the results attained using both strategies.</p>
        <p>With regard to the number of patterns selected (<xref ref-type="table" rid="table4">Table 4</xref>), when we have chosen the best extension, we have only reduced the total number of patterns by less than one-third because the majority of the patterns only have 1 or 2 extensions.</p>
        <p>If we study the length of the patterns (<xref ref-type="table" rid="table5">Table 5</xref>), in this experiment (and in those that follow) the majority of the patterns have a length of around 4, and it is now possible to find more patterns with a shorter length. Note that the distribution of patterns by length has changed. We currently have more general patterns that are shorter. This produces worse classification results when we use expert discretization with a JRIP classifier. It is well known that expert discretization usually performs worse because it is not based on a statistical or information theory that has been specifically designed for classification purposes. This also occurs in almost all of the following experiments.</p>
        <p>However, the results obtained with UCPD are similar, and even with the JRIP classification and beam search, we need the lowest number of items and patterns from all the experiments: only 5 patterns with a total length of 20 items are required to attain 56.32% specificity.</p>
        <table-wrap position="float" id="table9">
          <label>Table 9</label>
          <caption>
            <p>Results of Experiment 2a using the differential DOR<sup>a</sup> (keeping all pattern extensions).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="30"/>
            <col width="140"/>
            <col width="160"/>
            <col width="120"/>
            <col width="140"/>
            <col width="110"/>
            <col width="110"/>
            <col width="100"/>
            <col width="60"/>
            <thead>
              <tr valign="top">
                <td colspan="3">Classifier, discretization, and pattern support (%)</td>
                <td>Number of patterns</td>
                <td>Total length (items)</td>
                <td>Average length (items/pattern)</td>
                <td>Sensitivity (%)</td>
                <td>Specificity (%)</td>
                <td>Accuracy (%)</td>
                <td>AUC<sup>b</sup></td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="3">
                  <bold>J48</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">
                  <bold>Expert</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="7">
                  <break/>
                </td>
                <td rowspan="3">
                  <break/>
                </td>
                <td>10</td>
                <td>28</td>
                <td>100</td>
                <td>3.57</td>
                <td>89.42</td>
                <td>49.43</td>
                <td>81.94</td>
                <td>0.662</td>
              </tr>
              <tr valign="top">
                <td>
                  8
                </td>
                <td>
                  21
                </td>
                <td>
                  89
                </td>
                <td>
                  4.24
                </td>
                <td>86.51</td>
                <td>62.07</td>
                <td>81.94</td>
                <td>0.773</td>
              </tr>
              <tr valign="top">
                <td>
                  6
                </td>
                <td>
                  18
                </td>
                <td>
                  84
                </td>
                <td>
                  4.67
                </td>
                <td>
                  96.30
                </td>
                <td>
                  44.83
                </td>
                <td>
                  86.67
                </td>
                <td>
                  0.694
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>UCPD<sup>c</sup></bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="3">
                  <break/>
                </td>
                <td>
                  16
                </td>
                <td>
                  21
                </td>
                <td>
                  81
                </td>
                <td>
                  3.86
                </td>
                <td>
                  93.65
                </td>
                <td>
                  49.43
                </td>
                <td>
                  85.38
                </td>
                <td>
                  0.677
                </td>
              </tr>
              <tr valign="top">
                <td>
                  14
                </td>
                <td>
                  15
                </td>
                <td>
                  56
                </td>
                <td>
                  3.73
                </td>
                <td>
                  94.97
                </td>
                <td>
                  56.32
                </td>
                <td>
                  87.74
                </td>
                <td>
                  0.759
                </td>
              </tr>
              <tr valign="top">
                <td>
                  12
                </td>
                <td>
                  12
                </td>
                <td>
                  52
                </td>
                <td>
                  4.33
                </td>
                <td>100.00</td>
                <td>58.62</td>
                <td>92.26</td>
                <td>0.788</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>JRIP</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">
                  <bold>Expert</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="7">
                  <break/>
                </td>
                <td rowspan="3">
                  <break/>
                </td>
                <td>10</td>
                <td>4</td>
                <td>13</td>
                <td>3.25</td>
                <td>90.74</td>
                <td>31.03</td>
                <td>79.57</td>
                <td>0.620</td>
              </tr>
              <tr valign="top">
                <td>
                  8
                </td>
                <td>
                  8
                </td>
                <td>
                  25
                </td>
                <td>
                  3.13
                </td>
                <td>
                  86.77
                </td>
                <td>
                  29.89
                </td>
                <td>
                  76.13
                </td>
                <td>
                 0.600
                </td>
              </tr>
              <tr valign="top">
                <td>
                  6
                </td>
                <td>
                 3
                </td>
                <td>
                  7
                </td>
                <td>
                  2.33
                </td>
                <td>
                  89.68
                </td>
                <td>
                  29.89
                </td>
                <td>
                  78.49
                </td>
                <td>
                  0.594
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>UCPD</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="3">
                  <break/>
                </td>
                <td>
                  16
                </td>
                <td>
                  10
                </td>
                <td>
                 37
                </td>
                <td>
                  3.70
                </td>
                <td>
                  92.86
                </td>
                <td>
                  24.14
                </td>
                <td>
                  80.00
                </td>
                <td>
                  0.594
                </td>
              </tr>
              <tr valign="top">
                <td>
                  14
                </td>
                <td>
                 11
                </td>
                <td>
                 41
                </td>
                <td>
                  3.73
                </td>
                <td>
                  94.18
                </td>
                <td>
                 33.33
                </td>
                <td>
                 82.80
                </td>
                <td>
                  0.674
                </td>
              </tr>
              <tr valign="top">
                <td>
                 12
                </td>
                <td>
                  8
                </td>
                <td>
                  26
                </td>
                <td>3.25</td>
                <td>96.03</td>
                <td>62.07</td>
                <td>89.68</td>
                <td>0.831</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table9fn1">
              <p><sup>a</sup>DOR: diagnostic odds ratio.</p>
            </fn>
            <fn id="table9fn2">
              <p><sup>b</sup>AUC: area under the receiver operating characteristic curve.</p>
            </fn>
            <fn id="table9fn3">
              <p><sup>c</sup>UCPD: unsupervised correlation preserving discretization.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <table-wrap position="float" id="table10">
          <label>Table 10</label>
          <caption>
            <p>Results of Experiment 2b using the differential DOR<sup>a</sup> (using beam search for best pattern extension).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="30"/>
            <col width="140"/>
            <col width="160"/>
            <col width="120"/>
            <col width="140"/>
            <col width="110"/>
            <col width="110"/>
            <col width="100"/>
            <col width="60"/>
            <thead>
              <tr valign="top">
                <td colspan="3">Classifier, discretization, and pattern support (%)</td>
                <td>Number of patterns</td>
                <td>Total length (items)</td>
                <td>Average length (items/pattern)</td>
                <td>Sensitivity (%)</td>
                <td>Specificity (%)</td>
                <td>Accuracy (%)</td>
                <td>AUC<sup>b</sup></td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="3">
                  <bold>J48</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">
                  <bold>Expert</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="7">
                  <break/>
                </td>
                <td rowspan="3">
                  <break/>
                </td>
                <td>10</td>
                <td>20</td>
                <td>73</td>
                <td>3.65</td>
                <td>89.15</td>
                <td>44.83</td>
                <td>80.86</td>
                <td>0.642</td>
              </tr>
              <tr valign="top">
                <td>
                  8
                </td>
                <td>
                  21
                </td>
                <td>
                  88
                </td>
                <td>4.19</td>
                <td>87.57</td>
                <td>62.07</td>
                <td>82.80</td>
                <td>0.783</td>
              </tr>
              <tr valign="top">
                <td>
                  6
                </td>
                <td>
                  18
                </td>
                <td>
                  84
                </td>
                <td>
                  4.67
                </td>
                <td>
                  97.35
                </td>
                <td>
                  43.68
                </td>
                <td>
                  87.31
                </td>
                <td>
                  0.710
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>UCPD<sup>c</sup></bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="3">
                  <break/>
                </td>
                <td>
                 16
                </td>
                <td>
                  21
                </td>
                <td>
                  81
                </td>
                <td>
                  3.86
                </td>
                <td>
                  93.65
                </td>
                <td>
                  49.43
                </td>
                <td>
                  85.38
                </td>
                <td>
                  0.675
                </td>
              </tr>
              <tr valign="top">
                <td>
                  14
                </td>
                <td>
                  15
                </td>
                <td>
                  56
                </td>
                <td>
                  3.73
                </td>
                <td>
                  94.71
                </td>
                <td>
                  56.32
                </td>
                <td>
                  87.53
                </td>
                <td>
                  0.760
                </td>
              </tr>
              <tr valign="top">
                <td>
                  12
                </td>
                <td>
                  12
                </td>
                <td>
                  52
                </td>
                <td>
                  4.33
                </td>
                <td>100.00</td>
                <td>57.47</td>
                <td>92.04</td>
                <td>0.764</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>JRIP</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">
                  <bold>Expert</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="7">
                  <break/>
                </td>
                <td rowspan="3">
                  <break/>
                </td>
                <td>
                  10
                </td>
                <td>
                  18
                </td>
                <td>
                  59
                </td>
                <td>
                  3.28
                </td>
                <td>
                  89.15
                </td>
                <td>
                  27.59
                </td>
                <td>
                  77.63
                </td>
                <td>
                  0.582
                </td>
              </tr>
              <tr valign="top">
                <td>
                  8
                </td>
                <td>
                  5
                </td>
                <td>
                  17
                </td>
                <td>
                  3.4
                </td>
                <td>
                  90.48
                </td>
                <td>
                  21.84
                </td>
                <td>
                  77.63
                </td>
                <td>
                  0.569
                </td>
              </tr>
              <tr valign="top">
                <td>
                  6
                </td>
                <td>
                  8
                </td>
                <td>
                  29
                </td>
                <td>
                  3.62
                </td>
                <td>
                  91.53
                </td>
                <td>
                  31.03
                </td>
                <td>
                  80.22
                </td>
                <td>
                  0.623
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>UCPD</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="3">
                  <break/>
                </td>
                <td>
                  16
                </td>
                <td>
                  9
                </td>
                <td>
                  31
                </td>
                <td>
                  3.44
                </td>
                <td>
                  91.01
                </td>
                <td>
                  28.74
                </td>
                <td>
                  79.35
                </td>
                <td>
                  0.618
                </td>
              </tr>
              <tr valign="top">
                <td>
                  14
                </td>
                <td>
                  19
                </td>
                <td>
                  71
                </td>
                <td>
                  3.74
                </td>
                <td>
                  94.18
                </td>
                <td>
                  34.48
                </td>
                <td>
                  83.01
                </td>
                <td>
                  0.683
                </td>
              </tr>
              <tr valign="top">
                <td>
                  12
                </td>
                <td>5</td>
                <td>20</td>
                <td>
                  4
                </td>
                <td>97.09</td>
                <td>56.32</td>
                <td>89.46</td>
                <td>0.767</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table10fn1">
              <p><sup>a</sup>DOR: diagnostic odds ratio.</p>
            </fn>
            <fn id="table10fn2">
              <p><sup>b</sup>AUC: area under the receiver operating characteristic curve.</p>
            </fn>
            <fn id="table10fn3">
              <p><sup>c</sup>UCPD: unsupervised correlation preserving discretization.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p>The J48 classification tree used to classify with expert discretization and 8% support, using beam search for the best pattern extension, makes it possible to attain 62.07% specificity, and require 21 patterns, with an average length of 4.19 items per pattern. This average is the lowest value of all the experiments carried out using the J48 classifier with expert discretization. Within these 21 patterns, we can find 2 patterns with only 2 items, which are used to classify the survivors:</p>
        <disp-formula><italic>DIUR</italic><sub>3</sub> &#60; <italic>BE</italic><sub>2</sub> (40.23% PROTECTION) (43 deaths, 150 survivors)</disp-formula>
        <disp-formula><italic>INC</italic><sub>2</sub> = <italic>PH</italic><sub>3</sub> (43.58% PROTECTION) (35 deaths, 176 survivors)</disp-formula>
        <p>The first pattern, <italic>DIUR</italic><sub>3</sub> &#60; <italic>BE</italic><sub>2</sub>, is interesting because if the <italic>PH</italic> is very high the next day and has the extension <italic>DIUR</italic><sub>3</sub> &#60; <italic>BE</italic><sub>2</sub> &#60; <italic>PH</italic><sub>4</sub> (78.85% PROTECTION; 5 deaths, 70 survivors), the patient survival rate increases by 38.62%.</p>
        <p>Furthermore, we have discovered a pattern with which to classify the nonsurvivors that can also be found in the J48 tree classifiers of the subsequent experiments, and that was not selected in the classification algorithms used in the previous experiments:</p>
        <disp-formula><italic>p<sub>i</sub></italic><sub>1</sub> = <italic>BIC</italic><sub>1</sub> &#60; <italic>BIC</italic><sub>1</sub> &#60; <italic>PH</italic><sub>1</sub> (98.87% RISK; 9 deaths, 0 survivors)</disp-formula>
        <p>This pattern has a DOR value of DOR(<italic>p<sub>i</sub></italic><sub>1</sub>) = 87.12, with a risk probability of <italic>R</italic>(<italic>p<sub>i</sub></italic><sub>1</sub>) = 98.87%. It has been selected because its super-pattern <italic>p<sub>i</sub></italic> = <italic>BIC</italic><sub>1</sub> &#60; <italic>BIC</italic><sub>2</sub> (44 deaths, 111 survivors) has a DOR value of DOR(<italic>p<sub>i</sub></italic><sub>1</sub>) = 2.46, with a risk probability of <italic>R</italic>(<italic>p<sub>i</sub></italic>) = 71.1%. This signifies that there is an increase in the risk of <italic>R</italic>(<italic>p<sub>i</sub></italic><sub>1</sub>) – <italic>R</italic>(<italic>p<sub>i</sub></italic><sub>1</sub>) = 27.77%, which is higher than the 25% fixed threshold.</p>
      </sec>
      <sec>
        <title>Experiment 3: Using the Nonoverlapping of the CI of the DOR</title>
        <p>In this experiment, we have selected patterns based on the nonoverlapping of 95% CI of the DOR (as stated in [<xref ref-type="bibr" rid="ref38">38</xref>]). In addition, only patterns whose CI does not include the value 1 have been included in the output (as occurred in [<xref ref-type="bibr" rid="ref40">40</xref>]). All the patterns are, therefore, either a protector factor or a risk factor, but not both or undetermined.</p>
        <p><xref ref-type="table" rid="table11">Table 11</xref> shows the results obtained when we maintain all the pattern extensions, while <xref ref-type="table" rid="table12">Table 12</xref> shows the results obtained when only the best pattern extension is chosen using beam search.</p>
        <p>We also obtain a reduced number of patterns with respect to the previous experiment (<xref ref-type="table" rid="table4">Table 4</xref>), and an advantage of this experiment is that this number does not depend on a threshold value.</p>
        <p>In general, the classification performance is similar to that of the previous experiments, although with the JRIP classification using expert discretization, we obtain better results when selecting only the best child.</p>
        <p>The J48 classification tree used to classify with expert discretization, and 8% support, using beam search for best pattern extension, allows us to obtain 58.62% specificity and a higher sensitivity than the previous experiment: 16 patterns are required.</p>
        <p>One of the shortest patterns that we find in the J48 classification tree is:</p>
        <disp-formula><italic>PH</italic><sub>4</sub> &#60; <italic>PH</italic><sub>4</sub> &#60; <italic>BE</italic><sub>1</sub> (6 deaths, 1 survivors)</disp-formula>
        <p>This pattern has a DOR value of 27.93 in the interval (6.71, 116.26). Its super-pattern <italic>PH</italic><sub>4</sub> &#60; <italic>PH</italic><sub>4</sub> (14 deaths, 109 survivors) has a DOR value of 0.47 in the interval (0.26, 0.87). Note that the CI of these patterns does not overlap.</p>
        <table-wrap position="float" id="table11">
          <label>Table 11</label>
          <caption>
            <p>Results of Experiment 3a using the nonoverlapping CI of DOR<sup>a</sup> (keeping all pattern extensions).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="30"/>
            <col width="140"/>
            <col width="160"/>
            <col width="120"/>
            <col width="140"/>
            <col width="110"/>
            <col width="110"/>
            <col width="100"/>
            <col width="60"/>
            <thead>
              <tr valign="top">
                <td colspan="3">Classifier, discretization, and pattern support (%)</td>
                <td>Number of patterns</td>
                <td>Total length (items)</td>
                <td>Average length (items/pattern)</td>
                <td>Sensitivity (%)</td>
                <td>Specificity (%)</td>
                <td>Accuracy (%)</td>
                <td>AUC<sup>b</sup></td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="3">
                  <bold>J48</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">
                  <bold>Expert</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="7">
                  <break/>
                </td>
                <td rowspan="3">
                  <break/>
                </td>
                <td>10</td>
                <td>10</td>
                <td>41</td>
                <td>4.1</td>
                <td>93.92</td>
                <td>48.28</td>
                <td>85.38</td>
                <td>0.721</td>
              </tr>
              <tr valign="top">
                <td>
                  8
                </td>
                <td>
                  16
                </td>
                <td>
                 77
                </td>
                <td>
                  4.81
                </td>
                <td>94.97</td>
                <td>58.62</td>
                <td>88.17</td>
                <td>0.741</td>
              </tr>
              <tr valign="top">
                <td>
                  6
                </td>
                <td>
                  18
                </td>
                <td>
                  90
                </td>
                <td>
                  5
                </td>
                <td>
                  96.56
                </td>
                <td>
                 56.32
                </td>
                <td>
                  89.03
                </td>
                <td>
                  0.768
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>UCPD<sup>c</sup></bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="3">
                  <break/>
                </td>
                <td>
                  16
                </td>
                <td>
                  18
                </td>
                <td>
                  70
                </td>
                <td>
                 3.89
                </td>
                <td>
                  97.35
                </td>
                <td>
                  57.47
                </td>
                <td>
                  89.89
                </td>
                <td>
                  0.794
                </td>
              </tr>
              <tr valign="top">
                <td>
                  14
                </td>
                <td>
                 11
                </td>
                <td>
                  43
                </td>
                <td>
                  3.91
                </td>
                <td>99.74</td>
                <td>62.07</td>
                <td>92.69</td>
                <td>0.803</td>
              </tr>
              <tr valign="top">
                <td>
                  12
                </td>
                <td>
                  11
                </td>
                <td>
                  47
                </td>
                <td>
                  4.27
                </td>
                <td>
                  100.00
                </td>
                <td>
                  57.47
                </td>
                <td>
                  92.04
                </td>
                <td>
                  0.786
                </td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>JRIP</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">
                  <bold>Expert</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="7">
                  <break/>
                </td>
                <td rowspan="3">
                  <break/>
                </td>
                <td>10</td>
                <td>11</td>
                <td>37</td>
                <td>3.36</td>
                <td>93.65</td>
                <td>41.38</td>
                <td>83.87</td>
                <td>0.682</td>
              </tr>
              <tr valign="top">
                <td>
                  8
                </td>
                <td>
                  13
                </td>
                <td>
                  60
                </td>
                <td>
                 4.62
                </td>
                <td>
                  91.80
                </td>
                <td>
                 33.33
                </td>
                <td>
                  80.86
                </td>
                <td>
                 0.641
                </td>
              </tr>
              <tr valign="top">
                <td>
                  6
                </td>
                <td>
                  7
                </td>
                <td>
                  30
                </td>
                <td>
                  4.29
                </td>
                <td>
                  96.56
                </td>
                <td>
                  42.53
                </td>
                <td>
                  86.45
                </td>
                <td>
                  0.722
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>UCPD</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="3">
                  <break/>
                </td>
                <td>
                  16
                </td>
                <td>
                  6
                </td>
                <td>
                 23
                </td>
                <td>
                  3.83
                </td>
                <td>
                  96.30
                </td>
                <td>
                 41.38
                </td>
                <td>
                  86.02
                </td>
                <td>
                  0.727
                </td>
              </tr>
              <tr valign="top">
                <td>
                 14
                </td>
                <td>
                  9
                </td>
                <td>
                  33
                </td>
                <td>
                  3.67
                </td>
                <td>
                  98.94
                </td>
                <td>
                 56.32
                </td>
                <td>
                  90.97
                </td>
                <td>
                  0.803
                </td>
              </tr>
              <tr valign="top">
                <td>
                 12
                </td>
                <td>
                 14
                </td>
                <td>
                  58
                </td>
                <td>
                  4.14
                </td>
                <td>96.30</td>
                <td>60.92</td>
                <td>89.68</td>
                <td>0.793</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table11fn1">
              <p><sup>a</sup>DOR: diagnostic odds ratio.</p>
            </fn>
            <fn id="table11fn2">
              <p><sup>b</sup>AUC: area under the receiver operating characteristic curve.</p>
            </fn>
            <fn id="table11fn3">
              <p><sup>c</sup>UCPD: unsupervised correlation preserving discretization.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <table-wrap position="float" id="table12">
          <label>Table 12</label>
          <caption>
            <p>Results of Experiment 3b using the nonoverlapping CI of DOR<sup>a</sup> (using beam search for best pattern extension).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="30"/>
            <col width="140"/>
            <col width="160"/>
            <col width="120"/>
            <col width="140"/>
            <col width="110"/>
            <col width="110"/>
            <col width="100"/>
            <col width="60"/>
            <thead>
              <tr valign="top">
                <td colspan="3">Classifier, discretization, and pattern support (%)</td>
                <td>Number of patterns</td>
                <td>Total length (items)</td>
                <td>Average length (items/pattern)</td>
                <td>Sensitivity (%)</td>
                <td>Specificity (%)</td>
                <td>Accuracy (%)</td>
                <td>AUC<sup>b</sup></td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="3">
                  <bold>J48</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">
                  <bold>Expert</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="7">
                  <break/>
                </td>
                <td rowspan="3">
                  <break/>
                </td>
                <td>10</td>
                <td>10</td>
                <td>41</td>
                <td>4.1</td>
                <td>94.18</td>
                <td>51.72</td>
                <td>86.24</td>
                <td>0.742</td>
              </tr>
              <tr valign="top">
                <td>
                  8
                </td>
                <td>
                 16
                </td>
                <td>
                  77
                </td>
                <td>
                 4.81
                </td>
                <td>94.71</td>
                <td>58.62</td>
                <td>87.96</td>
                <td>0.739</td>
              </tr>
              <tr valign="top">
                <td>
                  6
                </td>
                <td>
                 18
                </td>
                <td>
                 90
                </td>
                <td>
                 5
                </td>
                <td>
                  96.83
                </td>
                <td>
                  55.17
                </td>
                <td>
                  89.03
                </td>
                <td>
                  0.758
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>UCPD<sup>c</sup></bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="3">
                  <break/>
                </td>
                <td>
                  16
                </td>
                <td>
                 16
                </td>
                <td>
                  68
                </td>
                <td>
                  4.25
                </td>
                <td>
                  96.30
                </td>
                <td>
                 55.17
                </td>
                <td>
                  88.60
                </td>
                <td>
                  0.798
                </td>
              </tr>
              <tr valign="top">
                <td>
                  14
                </td>
                <td>
                  13
                </td>
                <td>
                 51
                </td>
                <td>
                  3.92
                </td>
                <td>100.00</td>
                <td>62.07</td>
                <td>92.90</td>
                <td>0.795</td>
              </tr>
              <tr valign="top">
                <td>
                 12
                </td>
                <td>
                  11
                </td>
                <td>
                  45
                </td>
                <td>
                  4.09
                </td>
                <td>
                  100.00
                </td>
                <td>
                  60.92
                </td>
                <td>
                  92.69
                </td>
                <td>
                  0.812
                </td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>JRIP</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">
                  <bold>Expert</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="7">
                  <break/>
                </td>
                <td rowspan="3">
                  <break/>
                </td>
                <td>10</td>
                <td>6</td>
                <td>20</td>
                <td>3.33</td>
                <td>94.44</td>
                <td>48.28</td>
                <td>85.81</td>
                <td>0.735</td>
              </tr>
              <tr valign="top">
                <td>
                  8
                </td>
                <td>
                  16
                </td>
                <td>
                  62
                </td>
                <td>
                  3.88
                </td>
                <td>
                  95.24
                </td>
                <td>
                  41.38
                </td>
                <td>
                  85.16
                </td>
                <td>
                 0.700
                </td>
              </tr>
              <tr valign="top">
                <td>
                  6
                </td>
                <td>
                  12
                </td>
                <td>
                  51
                </td>
                <td>4.25</td>
                <td>95.77</td>
                <td>52.87</td>
                <td>87.74</td>
                <td>0.747</td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>UCPD</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="3">
                  <break/>
                </td>
                <td>
                 16
                </td>
                <td>
                 16
                </td>
                <td>
                  66
                </td>
                <td>
                  4.13
                </td>
                <td>
                  95.50
                </td>
                <td>
                  40.23
                </td>
                <td>
                  85.16
                </td>
                <td>
                  0.695
                </td>
              </tr>
              <tr valign="top">
                <td>
                  14
                </td>
                <td>
                  12
                </td>
                <td>
                 44
                </td>
                <td>
                  3.67
                </td>
                <td>
                  97.88
                </td>
                <td>
                  54.02
                </td>
                <td>
                  89.68
                </td>
                <td>
                  0.747
                </td>
              </tr>
              <tr valign="top">
                <td>
                  12
                </td>
                <td>
                  15
                </td>
                <td>
                  60
                </td>
                <td>
                  4
                </td>
                <td>99.21</td>
                <td>55.17</td>
                <td>90.97</td>
                <td>0.788</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table12fn1">
              <p><sup>a</sup>DOR: diagnostic odds ratio.</p>
            </fn>
            <fn id="table12fn2">
              <p><sup>b</sup>AUC: area under the receiver operating characteristic curve.</p>
            </fn>
            <fn id="table12fn3">
              <p><sup>c</sup>UCPD: unsupervised correlation preserving discretization.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Experiment 4: Using a Differential DOR With the Nonoverlapping of the CI</title>
        <p>The last proposal consists of using the previous 2 approaches together (Experiments 2 and 3), signifying that we prune the patterns based on the overlapping of the CI of the DOR, and also based on the difference between the risk (or protection) factor probabilities. In both cases, we maintain the same thresholds.</p>
        <p>In this experiment we substantially reduced the number of patterns generated (<xref ref-type="table" rid="table4">Table 4</xref>). For example, in the case of expert discretization and 8% support (keeping all pattern extensions), we obtained only 701 patterns with this experiment, which is a decrease of 68.06% from nonoverlapping DOR (with 2195 patterns) and a decrease of 85.78% with respect to the baseline experiment (with 4931 patterns).</p>
        <p>It is necessary to consider that if the number of patterns is too low, we do not usually achieve a good classification result. But with this experiment, for example, with 8% support, expert discretization, and the J48 classifier, with only 504 patterns, we have obtained a similar result to previous ones, using only 13 patterns in the classifier, with a sensitivity of 96.30% and a specificity of 57.47% in the beam search for the best pattern extension (<xref ref-type="table" rid="table13">Table 13</xref>). This is the lowest number of patterns required for expert and J48 discretization, with a total length of only 55 items.</p>
        <p>The classification performance, as is shown in <xref ref-type="table" rid="table13">Tables 13</xref> and <xref ref-type="table" rid="table14">14</xref>, is similar to that of the previous experiments.</p>
        <p>Let us now analyze the pattern that is selected in this experiment and in all the previous experiments: <italic>DIUR</italic><sub>3</sub> &#60; <italic>INC</italic><sub>0</sub> &#60; <italic>INC</italic><sub>0</sub> &#60; <italic>DIUR</italic><sub>3</sub> (10 deaths, 0 survivors). It has a DOR value of 98.05 in the interval (24.21, 397.18), with a risk probability of 98.99%. Its super-pattern <italic>DIUR</italic><sub>3</sub> &#60; <italic>INC</italic><sub>0</sub> &#60; <italic>INC</italic><sub>0</sub> has a DOR value of 2.07 in the interval (1.20, 3.57) with a risk probability of 67.39%, signifying that there is no overlapping in the CI, and that there is an increase in the risk probability of 31.6%.</p>
        <table-wrap position="float" id="table13">
          <label>Table 13</label>
          <caption>
            <p>Results of Experiment 4b using the differential DOR<sup>a</sup> and the nonoverlapping CI (using beam search for best pattern extension).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="30"/>
            <col width="140"/>
            <col width="160"/>
            <col width="120"/>
            <col width="140"/>
            <col width="110"/>
            <col width="110"/>
            <col width="100"/>
            <col width="60"/>
            <thead>
              <tr valign="top">
                <td colspan="3">Classifier, discretization, and pattern support (%)</td>
                <td>Number of patterns</td>
                <td>Total length (items)</td>
                <td>Average length (items/pattern)</td>
                <td>Sensitivity (%)</td>
                <td>Specificity (%)</td>
                <td>Accuracy (%)</td>
                <td>AUC<sup>b</sup></td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="3">
                  <bold>J48</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">
                  <bold>Expert</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="7">
                  <break/>
                </td>
                <td rowspan="3">
                  <break/>
                </td>
                <td>10</td>
                <td>10</td>
                <td>35</td>
                <td>3.5</td>
                <td>95.50</td>
                <td>41.38</td>
                <td>85.38</td>
                <td>0.694</td>
              </tr>
              <tr valign="top">
                <td>
                  8
                </td>
                <td>13</td>
                <td>55</td>
                <td>
                  4.23
                </td>
                <td>96.30</td>
                <td>57.47</td>
                <td>89.03</td>
                <td>0.770</td>
              </tr>
              <tr valign="top">
                <td>
                  6
                </td>
                <td>
                  16
                </td>
                <td>
                  75
                </td>
                <td>
                  4.69
                </td>
                <td>
                  98.41
                </td>
                <td>
                  50.57
                </td>
                <td>
                  89.46
                </td>
                <td>
                  0.739
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>UCPD<sup>c</sup></bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="3">
                  <break/>
                </td>
                <td>
                 16
                </td>
                <td>
                  20
                </td>
                <td>
                  74
                </td>
                <td>
                  3.7
                </td>
                <td>
                  93.92
                </td>
                <td>
                  50.57
                </td>
                <td>
                  85.81
                </td>
                <td>
                 0.758
                </td>
              </tr>
              <tr valign="top">
                <td>
                  14
                </td>
                <td>
                  7
                </td>
                <td>
                  28
                </td>
                <td>
                  4
                </td>
                <td>
                  96.83
                </td>
                <td>
                  58.62
                </td>
                <td>
                  89.68
                </td>
                <td>
                  0.808
                </td>
              </tr>
              <tr valign="top">
                <td>
                  12
                </td>
                <td>
                  12
                </td>
                <td>
                  50
                </td>
                <td>
                  4.17
                </td>
                <td>100.00</td>
                <td>59.77</td>
                <td>92.47</td>
                <td>0.812</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>JRIP</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">
                  <bold>Expert</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="7">
                  <break/>
                </td>
                <td rowspan="3">
                  <break/>
                </td>
                <td>10</td>
                <td>6</td>
                <td>21</td>
                <td>3.5</td>
                <td>92.59</td>
                <td>25.29</td>
                <td>80.00</td>
                <td>0.597</td>
              </tr>
              <tr valign="top">
                <td>
                 8
                </td>
                <td>
                  14
                </td>
                <td>
                  43
                </td>
                <td>
                  3.07
                </td>
                <td>
                  91.80
                </td>
                <td>
                  29.89
                </td>
                <td>
                  80.22
                </td>
                <td>
                  0.614
                </td>
              </tr>
              <tr valign="top">
                <td>
                  6
                </td>
                <td>
                  15
                </td>
                <td>
                  57
                </td>
                <td>
                  3.8
                </td>
                <td>
                  92.59
                </td>
                <td>
                  29.89
                </td>
                <td>
                  80.86
                </td>
                <td>
                  0.626
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>UCPD</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="3">
                  <break/>
                </td>
                <td>
                 16
                </td>
                <td>
                 10
                </td>
                <td>
                  37
                </td>
                <td>
                  3.7
                </td>
                <td>
                  96.83
                </td>
                <td>
                  35.63
                </td>
                <td>
                  85.38
                </td>
                <td>
                  0.671
                </td>
              </tr>
              <tr valign="top">
                <td>
                  14
                </td>
                <td>
                  10
                </td>
                <td>
                  36
                </td>
                <td>
                  3.6
                </td>
                <td>
                  98.68
                </td>
                <td>
                  32.18
                </td>
                <td>
                  86.24
                </td>
                <td>
                  0.673
                </td>
              </tr>
              <tr valign="top">
                <td>
                  12
                </td>
                <td>
                  15
                </td>
                <td>
                  59
                </td>
                <td>
                  3.93
                </td>
                <td>98.68</td>
                <td>50.57</td>
                <td>89.68</td>
                <td>0.759</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table13fn1">
              <p><sup>a</sup>DOR: diagnostic odds ratio.</p>
            </fn>
            <fn id="table13fn2">
              <p><sup>b</sup>AUC: area under the receiver operating characteristic curve.</p>
            </fn>
            <fn id="table13fn3">
              <p><sup>c</sup>UCPD: unsupervised correlation preserving discretization.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <table-wrap position="float" id="table14">
          <label>Table 14</label>
          <caption>
            <p>Results of Experiment 4a using the differential DOR<sup>a</sup> and the nonoverlapping CI (keeping all pattern extensions).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="30"/>
            <col width="140"/>
            <col width="160"/>
            <col width="120"/>
            <col width="140"/>
            <col width="110"/>
            <col width="110"/>
            <col width="100"/>
            <col width="60"/>
            <thead>
              <tr valign="top">
                <td colspan="3">Classifier, discretization, and pattern support (%)</td>
                <td>Number of patterns</td>
                <td>Total length (items)</td>
                <td>Average length (items/pattern)</td>
                <td>Sensitivity (%)</td>
                <td>Specificity (%)</td>
                <td>Accuracy (%)</td>
                <td>AUC<sup>b</sup></td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="3">
                  <bold>J48</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">
                  <bold>Expert</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="7">
                  <break/>
                </td>
                <td rowspan="3">
                  <break/>
                </td>
                <td>10</td>
                <td>13</td>
                <td>42</td>
                <td>3.23</td>
                <td>94.18</td>
                <td>44.83</td>
                <td>84.95</td>
                <td>0.672</td>
              </tr>
              <tr valign="top">
                <td>
                  8
                </td>
                <td>13</td>
                <td>55</td>
                <td>
                 4.23
                </td>
                <td>95.50</td>
                <td>55.17</td>
                <td>87.96</td>
                <td>0.743</td>
              </tr>
              <tr valign="top">
                <td>
                  6
                </td>
                <td>
                  17
                </td>
                <td>
                  78
                </td>
                <td>
                  4.59
                </td>
                <td>
                  97.88
                </td>
                <td>
                  47.13
                </td>
                <td>
                  88.39
                </td>
                <td>
                  0.711
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>UCPD<sup>c</sup></bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="3">
                  <break/>
                </td>
                <td>
                  16
                </td>
                <td>
                  20
                </td>
                <td>
                  74
                </td>
                <td>
                  3.7
                </td>
                <td>
                  94.97
                </td>
                <td>
                  50.57
                </td>
                <td>
                  86.67
                </td>
                <td>
                  0.761
                </td>
              </tr>
              <tr valign="top">
                <td>
                  14
                </td>
                <td>
                  7
                </td>
                <td>
                  28
                </td>
                <td>
                  4
                </td>
                <td>
                  98.41
                </td>
                <td>
                  58.62
                </td>
                <td>
                  90.97
                </td>
                <td>
                  0.804
                </td>
              </tr>
              <tr valign="top">
                <td>
                  12
                </td>
                <td>
                  12
                </td>
                <td>
                  50
                </td>
                <td>
                  4.17
                </td>
                <td>100.00</td>
                <td>65.52</td>
                <td>93.55</td>
                <td>0.820</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>JRIP</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">
                  <bold>Expert</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="7">
                  <break/>
                </td>
                <td rowspan="3">
                  <break/>
                </td>
                <td>10</td>
                <td>4</td>
                <td>13</td>
                <td>3.25</td>
                <td>93.12</td>
                <td>29.89</td>
                <td>81.29</td>
                <td>0.622</td>
              </tr>
              <tr valign="top">
                <td>
                  8
                </td>
                <td>
                  12
                </td>
                <td>
                  40
                </td>
                <td>
                  3.33
                </td>
                <td>
                  94.44
                </td>
                <td>
                  29.89
                </td>
                <td>
                  82.37
                </td>
                <td>
                  0.625
                </td>
              </tr>
              <tr valign="top">
                <td>
                  6
                </td>
                <td>
                  20
                </td>
                <td>
                  74
                </td>
                <td>
                  3.7
                </td>
                <td>
                  91.80
                </td>
                <td>
                  39.08
                </td>
                <td>
                  81.94
                </td>
                <td>
                  0.668
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>UCPD</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="3">
                  <break/>
                </td>
                <td>
                  16
                </td>
                <td>
                  7
                </td>
                <td>
                  24
                </td>
                <td>
                  3.43
                </td>
                <td>
                  94.44
                </td>
                <td>
                  27.59
                </td>
                <td>
                  81.94
                </td>
                <td>
                  0.632
                </td>
              </tr>
              <tr valign="top">
                <td>
                 14
                </td>
                <td>
                  6
                </td>
                <td>
                  23
                </td>
                <td>
                  3.83
                </td>
                <td>
                  97.35
                </td>
                <td>
                  32.18
                </td>
                <td>
                  85.16
                </td>
                <td>
                  0.653
                </td>
              </tr>
              <tr valign="top">
                <td>
                  12
                </td>
                <td>
                  16
                </td>
                <td>
                  63
                </td>
                <td>
                  3.94
                </td>
                <td>98.68</td>
                <td>59.77</td>
                <td>91.40</td>
                <td>0.795</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table14fn1">
              <p><sup>a</sup>DOR: diagnostic odds ratio.</p>
            </fn>
            <fn id="table14fn2">
              <p><sup>b</sup>AUC: area under the receiver operating characteristic curve.</p>
            </fn>
            <fn id="table14fn3">
              <p><sup>c</sup>UCPD: unsupervised correlation preserving discretization.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>We have proposed different ways of using the DOR as a single indicator of diagnostic performance, by carrying out a classification of the survival of patients in an ICBU by studying their daily evolution using multivariate sequential patterns. We now discuss the factors that we have to consider to have a trade-off mainly between interpretability and classification performance.</p>
        <p>In relation to interpretability, a model is more interpretable than another model if its decisions are easier for a human to comprehend than decisions from the other model. In this sense, the presented method shows 3 advantages: (1) the readability and interpretability of the content of the patterns, (2) the reduced length of the patterns, and (3) the small set of significant patterns selected to build the classifier.</p>
        <p>Of these 3 advantages, the most direct one for the clinician is that the patterns themselves have an interpretation in the language understood by the clinician, who does not have to spend time looking for a correspondence between what he/she read in the pattern and his/her usual way of working. Moreover, the definition of the patterns provides not only static information about the patient at admission time, as it is usual, but also the evolution of the patient. For example, a pattern like <italic>DIUR</italic><sub>3</sub> &#60; <italic>INC</italic><sub>0</sub> &#60; <italic>INC</italic><sub>0</sub> &#60; <italic>DIUR</italic><sub>3</sub> leads the clinician to the clinical factors related to the pattern: high diuresis and very low incomings during 4 different days.</p>
        <p>For the second factor, if we study the length of the patterns eventually selected (<xref ref-type="table" rid="table5">Table 5</xref>), it will be noted that the majority of the patterns in the baseline experiment (using JEPs) and in the first experiment (using DOR) have a length of 6 items, whereas the majority of the patterns in the subsequent experiments have a length of 4 items. We can observe that the distribution of patterns by length has changed, with a larger number of shorter patterns in the last experiments, which are more difficult to use in a classifier, because they are more general. In subsequent Experiments 2-4, we have observed that, on the one hand, the classifier is less accurate. On the other hand, the shorter patterns are easier to understand, more general, and describe the population well, but simultaneously cover survivors and nonsurvivors.</p>
        <p>Overall, these shorter patterns produce worse classification results when we use expert discretization with a JRIP classifier. On the one hand, expert discretization generally performs worse, because it is not based on a statistical or information theory that has been specifically designed for classification purposes, and on the other hand, JRIP provides the best performance in terms of the complexity of the tree structure, while J48 produces a high classification accuracy (as the authors explain in [<xref ref-type="bibr" rid="ref43">43</xref>]). With shorter patterns, however, it is easier to interpret the meaning of the patterns and explain their behavior.</p>
        <p>With respect to the third factor, we could say that a model that allows us to achieve a good classification result with a low number of patterns (and consequently of items) is, therefore, preferable. In <xref ref-type="table" rid="table4">Table 4</xref> we obtained the smallest number of patterns with Experiment 4 (using a differential DOR and the nonoverlapping of the CI). These patterns are simultaneously restricted by these 2 conditions, and as we have selected a small number of patterns, it might even be interesting to carry out a manual revision and a study of them (although that is out of the scope of this work).</p>
        <p>The baseline experiment (using JEPs) and Experiment 3 (nonoverlapping CI of DOR) do not depend on a threshold value and we also obtain a reasonably small number of patterns. Nevertheless the threshold value that has been established in the other experiments (Experiments 1, 2, and 4) leads to changes in the number of patterns eventually selected. We have therefore made 2 variations in Experiment 1 (using DOR), by restricting the minimum DOR value that is necessary to select patterns (<xref ref-type="table" rid="table8">Table 8</xref>), signifying that we have been able to reduce significantly the appropriate number of patterns selected.</p>
        <p>When we work with imbalanced data, as is usual in medical domains, it is necessary to highlight the correct classification of rarely occurring cases when compared with other general cases. It is consequently necessary to check the highest specificity to choose the best classification result, which in our experiments is produced by using UCPD automatic discretization with JEPs as a classical frequency-based discriminative measure. JEPs have usually been used to build accurate classifiers, while UCPD exploits the underlying correlation structure in the data so as to obtain the discrete intervals and ensure that the inherent correlations are preserved.</p>
        <p>Moreover, we have generally shown that this automatic discretization performs better classifications than expert discretization. But clinicians prefer to use a reference range discretization for laboratory and physiologic values. This signifies that, for example, they prefer to use the interval (7.35, 7.45) as a normal value for <italic>PH</italic>, as it is usually managed in medicine. The interpretability of the classification results by using expert discretization is, therefore, a prevailing factor in our choice. A summary of the principal results of the experiments using only expert discretization is shown in <xref ref-type="table" rid="table15">Table 15</xref>.</p>
        <table-wrap position="float" id="table15">
          <label>Table 15</label>
          <caption>
            <p>Comparison of experimental results with the highest specificity using expert discretization.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="110"/>
            <col width="130"/>
            <col width="140"/>
            <col width="110"/>
            <col width="130"/>
            <col width="100"/>
            <col width="100"/>
            <col width="90"/>
            <col width="60"/>
            <thead>
              <tr valign="top">
                <td colspan="3">Experiment, classifier, and pattern support (%)</td>
                <td>Number of patterns</td>
                <td>Total length (items)</td>
                <td>Average length (items/pattern)</td>
                <td>Sensitivity (%)</td>
                <td>Specificity (%)</td>
                <td>Accuracy (%)</td>
                <td>AUC<sup>a</sup></td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="3">
                  <bold>JEPs<sup>b</sup></bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="2">
                  <break/>
                </td>
                <td>J48</td>
                <td>8</td>
                <td>17</td>
                <td>84</td>
                <td>4.94</td>
                <td>100.00</td>
                <td>56.32</td>
                <td>91.83</td>
                <td>0.782</td>
              </tr>
              <tr valign="top">
                <td>JRIP</td>
                <td>8</td>
                <td>15</td>
                <td>79</td>
                <td>5.27</td>
                <td>100.00</td>
                <td>58.62</td>
                <td>92.26</td>
                <td>0.777</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>1b: DOR<sup>c</sup></bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="2">
                  <break/>
                </td>
                <td>J48</td>
                <td>8</td>
                <td>17</td>
                <td>84</td>
                <td>4.94</td>
                <td>94.18</td>
                <td>55.17</td>
                <td>86.88</td>
                <td>0.767</td>
              </tr>
              <tr valign="top">
                <td>JRIP</td>
                <td>8</td>
                <td>14</td>
                <td>67</td>
                <td>4.79</td>
                <td>95.50</td>
                <td>62.07</td>
                <td>89.25</td>
                <td>0.801</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>2b: Differential DOR</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="2">
                  <break/>
                </td>
                <td>J48</td>
                <td>8</td>
                <td>21</td>
                <td>88</td>
                <td>4.19</td>
                <td>87.57</td>
                <td>62.07</td>
                <td>82.80</td>
                <td>0.783</td>
              </tr>
              <tr valign="top">
                <td>JRIP</td>
                <td>6</td>
                <td>8</td>
                <td>29</td>
                <td>3.62</td>
                <td>91.53</td>
                <td>31.03</td>
                <td>80.22</td>
                <td>0.623</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>3b: Nonoverlapping DOR</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="2">
                  <break/>
                </td>
                <td>J48</td>
                <td>8</td>
                <td>16</td>
                <td>77</td>
                <td>4.81</td>
                <td>94.71</td>
                <td>58.62</td>
                <td>87.96</td>
                <td>0.739</td>
              </tr>
              <tr valign="top">
                <td>JRIP</td>
                <td>6</td>
                <td>12</td>
                <td>51</td>
                <td>4.25</td>
                <td>95.77</td>
                <td>52.87</td>
                <td>87.74</td>
                <td>0.747</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>4b: Differential + nonoverlapping DOR</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="2">
                  <break/>
                </td>
                <td>J48</td>
                <td>8</td>
                <td>13</td>
                <td>55</td>
                <td>4.23</td>
                <td>96.30</td>
                <td>57.47</td>
                <td>89.03</td>
                <td>0.770</td>
              </tr>
              <tr valign="top">
                <td>JRIP</td>
                <td>6</td>
                <td>15</td>
                <td>57</td>
                <td>3.8</td>
                <td>92.59</td>
                <td>29.89</td>
                <td>80.86</td>
                <td>0.626</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table15fn1">
              <p><sup>a</sup>AUC: area under the receiver operating characteristic curve.</p>
            </fn>
            <fn id="table15fn2">
              <p><sup>b</sup>JEP: Jumping Emerging Pattern.</p>
            </fn>
            <fn id="table15fn3">
              <p><sup>c</sup>DOR: diagnostic odds ratio.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p>If we therefore consider only expert discretization, the best classification result is achieved in Experiment 1b (using DOR), with a specificity of 62.07% and an AUC value of 0.801 (<xref ref-type="table" rid="table8">Table 8</xref>). In this experiment we simultaneously obtained patterns found in both the survivors and the nonsurvivors based on only the DOR value of each pattern.</p>
        <p>The classification model that is easiest to comprehend and has high specificity requires only 5 patterns (with a total length of 20 items) and is achieved with UCPD and a JRIP classifier in Experiment 2b (differential DOR) using beam search for the best pattern. It obtains a specificity of 56.32% and an AUC value of 0.767 (<xref ref-type="table" rid="table10">Table 10</xref>).</p>
        <p>If we take into consideration only expert discretization, with a J48 classifier we need at least 13 patterns (with a total length of 55 items) to obtain a specificity of 57.47% and an AUC value of 0.770 (<xref ref-type="table" rid="table13">Table 13</xref>) in Experiment 4b (using a differential and a nonoverlapping DOR).</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>In this research, we have developed a model to predict the survival of patients by considering 2 aspects: the relevance of the temporal evolution of the patients as part of the model and an interpretable model for the physicians. We have achieved these aspects by (1) using the multivariate sequential patterns used in classification models that can be easily understood by experts, (2) using a reduced number of patterns, and (3) using a language that is well known by clinicians with regard to both the discretization of values and measures of interest of the patterns.</p>
        <p>The main contribution of this work is the proposal and evaluation of 4 ways in which to employ DOR to reduce the number of patterns and to select only the most discriminative ones, because pattern explosion is a principal problem in pattern-based classifiers. We have compared the 4 proposals with a baseline experiment using JEPs. This is, to the best of our knowledge, the first time that some of these approaches have been proposed and compared in scientific literature.</p>
        <p>With regard to the number of patterns, the best option is that of using both a differential and a nonoverlapping DOR (as in Experiment 4). As we have increased the restrictions applied, we have significantly reduced the number of patterns, thus attaining more general, simple, and interesting patterns. With expert discretization and 10% support, there are, for example, only 198 patterns (using beam search for best pattern), and, very interestingly, these patterns cover all the patients who did not survive. Despite not being within the scope of this paper, it would be interesting for a clinician to carry out a manual interpretation of these patterns.</p>
        <p>This experiment provides the second contribution of this paper, because we have shown that beam search with the DOR could be used in the algorithm to extract sequential patterns for classification rather than using a traditional algorithm for sequential pattern mining.</p>
        <p>Despite the efforts made to reduce the amount and the length of patterns in Experiments 2-4, in which we have compared each pattern with its extensions, the classifier built is less accurate. The shorter patterns are easier to understand, more general, and describe the population well, but simultaneously cover survivors and nonsurvivors.</p>
        <p>With regard to accuracy, the best classification results are, not surprisingly, produced using JEPs along with UCPD. JEPs have been extensively used to build accurate classifiers and produce better results when we use a discretization based on statistical or information theory that is specifically intended for classification. Nevertheless, we require interpretable patterns that are easy for the clinician to understand, and must therefore use a reference range discretization created by an expert. If we consider only expert discretization, the highest specificity is attained using only the DOR to select the patterns (as in Experiment 1; <xref ref-type="table" rid="table15">Table 15</xref>).</p>
        <p>With regard to interpretability, we can observe that discretization has a great impact on classification performance at the expense of interpretability, because more and longer patterns are required. With UCPD, we require only 5 patterns (with a total length of 20 items) to build a rule set and to obtain 56.32% specificity when we use the differential DOR (see Experiment 2). With expert discretization, we need at least 13 patterns (with a total length of 55 items) to obtain a specificity of 57.47% using both a differential and a nonoverlapping DOR to select the patterns (see Experiment 4).</p>
        <p>Our future research will consist of exploring domain-based measures to evaluate clinical patterns or to reduce the number of patterns in postprocessing to an even greater extent. In this respect, we intend to investigate more specific properties, such as closed, maximal, or minimal patterns as a trade-off between improving classification performance and not losing information or representativeness of the population. The researchers additionally intend to explore other measures and search strategies that could be integrated into new algorithms.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AUC</term>
          <def>
            <p>area under the receiver operating characteristic curve</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">CBA</term>
          <def>
            <p>Classification Based on Associations</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">CBS</term>
          <def>
            <p>Classify-By-Sequence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">CMAR</term>
          <def>
            <p>Classification Based on Multiple Association Rules</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">CPAR</term>
          <def>
            <p>Classification Based on Predictive Association Rules</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">DOR</term>
          <def>
            <p>diagnostic odds ratio</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">EP</term>
          <def>
            <p>emerging pattern</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">FN</term>
          <def>
            <p>false negative</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">FP</term>
          <def>
            <p>false positive</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">ICBU</term>
          <def>
            <p>intensive care burn unit</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">JEP</term>
          <def>
            <p>Jumping Emerging Pattern</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">MMAC</term>
          <def>
            <p>Multi-class, Multi-label Associative Classification</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb13">RIPPER</term>
          <def>
            <p>Repeated Incremental Pruning to Produce Error Reduction</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb14">SOFA</term>
          <def>
            <p>Sequential Organ Failure Assessment</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb15">TN</term>
          <def>
            <p>true negative</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb16">TP</term>
          <def>
            <p>true positive</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb17">UCPD</term>
          <def>
            <p>unsupervised correlation preserving discretization</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This work was partially funded by the SITSUS project (Ref: RTI2018-094832-B-I00), the CONFAINCE project (Ref: PID2021-122194OB-I00), supported by the Spanish Ministry of Science and Innovation the Spanish Agency for Research (MCIN/AEI/10.13039/501100011033) and, as appropriate, by ERDF A way of making Europe.</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared. This work does not relate to the employment of AG at Amazon.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Batal</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Fradkin</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Harrison</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Moerchen</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Hauskrecht</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Mining Recent Temporal Patterns for Event Detection in Multivariate Time Series Data</article-title>
          <year>2012</year>
          <conf-name>Proceedings of the 18th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, KDD 2012</conf-name>
          <conf-date>August 12-16</conf-date>
          <conf-loc>Beijing, China</conf-loc>
          <publisher-name>ACM Press</publisher-name>
          <fpage>280</fpage>
          <lpage>288</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/25937993"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/2339530.2339578</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bringmann</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Nijssen</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zimmermann</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Pattern-Based Classification: A Unifying Perspective</article-title>
          <year>2009</year>
          <conf-name>From Local Patterns to Global Models: Proceedings of the ECML/PKDD-09 Workshop (LeGo-09)</conf-name>
          <conf-date>September 7-11</conf-date>
          <conf-loc>Bled, Slovenia</conf-loc>
          <fpage>36</fpage>
          <lpage>50</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/1111.6191"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fan</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Efficient Mining of Interesting Emerging Patterns and Their Effective Use in Classification (PhD thesis)</article-title>
          <source>The Department of Computer Science and Software Engineering, University of Melbourne</source>
          <year>2004</year>
          <access-date>2022-07-25</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://hdl.handle.net/11343/38912">http://hdl.handle.net/11343/38912</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Han</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Xin</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Yan</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Frequent pattern mining: current status and future directions</article-title>
          <source>Data Min Knowl Disc</source>
          <year>2007</year>
          <month>1</month>
          <day>27</day>
          <volume>15</volume>
          <issue>1</issue>
          <fpage>55</fpage>
          <lpage>86</lpage>
          <pub-id pub-id-type="doi">10.1007/s10618-006-0059-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>He</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Gu</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Conditional discriminative pattern mining: Concepts and algorithms</article-title>
          <source>Information Sciences</source>
          <year>2017</year>
          <month>01</month>
          <volume>375</volume>
          <fpage>1</fpage>
          <lpage>15</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ins.2016.09.047</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Glas</surname>
              <given-names>AS</given-names>
            </name>
            <name name-style="western">
              <surname>Lijmer</surname>
              <given-names>JG</given-names>
            </name>
            <name name-style="western">
              <surname>Prins</surname>
              <given-names>MH</given-names>
            </name>
            <name name-style="western">
              <surname>Bonsel</surname>
              <given-names>GJ</given-names>
            </name>
            <name name-style="western">
              <surname>Bossuyt</surname>
              <given-names>PM</given-names>
            </name>
          </person-group>
          <article-title>The diagnostic odds ratio: a single indicator of test performance</article-title>
          <source>Journal of Clinical Epidemiology</source>
          <year>2003</year>
          <month>11</month>
          <volume>56</volume>
          <issue>11</issue>
          <fpage>1129</fpage>
          <lpage>1135</lpage>
          <pub-id pub-id-type="doi">10.1016/s0895-4356(03)00177-x</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Agrawal</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Srikant</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Mining sequential patterns</article-title>
          <source>Proceedings of the Eleventh International Conference on Data Engineering</source>
          <year>1995</year>
          <conf-name>Eleventh International Conference on Data Engineering</conf-name>
          <conf-date>Taipei, Taiwan</conf-date>
          <conf-loc>March 6-10, 1995</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>3</fpage>
          <lpage>14</lpage>
          <pub-id pub-id-type="doi">10.1109/icde.1995.380415</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Srikant</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Agrawal</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Apers</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Bouzeghoub</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gardarin</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Mining sequential patterns: Generalizations and performance improvements</article-title>
          <source>Advances in Database Technology — EDBT '96</source>
          <year>1996</year>
          <publisher-loc>Berlin, Heidelberg</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>1</fpage>
          <lpage>17</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zaki</surname>
              <given-names>MJ</given-names>
            </name>
          </person-group>
          <article-title>SPADE: an efficient algorithm for mining frequent sequences</article-title>
          <source>Machine Learning</source>
          <year>2001</year>
          <volume>42</volume>
          <issue>1/2</issue>
          <fpage>31</fpage>
          <lpage>60</lpage>
          <pub-id pub-id-type="doi">10.1007/3-540-45357-1_32</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <collab>Jian Pei</collab>
            <collab>Jiawei Han</collab>
            <name name-style="western">
              <surname>Mortazavi-Asl</surname>
              <given-names>B</given-names>
            </name>
            <collab>Jianyong Wang</collab>
            <name name-style="western">
              <surname>Pinto</surname>
              <given-names>H</given-names>
            </name>
            <collab>Qiming Chen</collab>
            <name name-style="western">
              <surname>Dayal</surname>
              <given-names>U</given-names>
            </name>
            <collab>Mei-Chun Hsu</collab>
          </person-group>
          <article-title>Mining sequential patterns by pattern-growth: the PrefixSpan approach</article-title>
          <source>IEEE Trans. Knowl. Data Eng</source>
          <year>2004</year>
          <month>11</month>
          <volume>16</volume>
          <issue>11</issue>
          <fpage>1424</fpage>
          <lpage>1440</lpage>
          <pub-id pub-id-type="doi">10.1109/tkde.2004.77</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gan</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Fournier-Viger</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Chao</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>PS</given-names>
            </name>
          </person-group>
          <article-title>A Survey of Parallel Sequential Pattern Mining</article-title>
          <source>ACM Trans. Knowl. Discov. Data</source>
          <year>2019</year>
          <month>07</month>
          <day>17</day>
          <volume>13</volume>
          <issue>3</issue>
          <fpage>1</fpage>
          <lpage>34</lpage>
          <pub-id pub-id-type="doi">10.1145/3314107</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Pei</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>CMAR: accurate and efficient classification based on multiple class-association rules</article-title>
          <source>IEEE Xplore</source>
          <year>2001</year>
          <conf-name>2001 IEEE International Conference on Data Mining</conf-name>
          <conf-date>August 7, 2002</conf-date>
          <conf-loc>San Jose, CA</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>369</fpage>
          <lpage>376</lpage>
          <pub-id pub-id-type="doi">10.1109/icdm.2001.989541</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nofal</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Bani-Ahmad</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Classification Based on Association-Rule Mining Techniquese a General Survey and Empirical Comparative Evaluation</article-title>
          <source>Ubiquitous Computing and Communication Journal</source>
          <year>2010</year>
          <volume>5</volume>
          <issue>3</issue>
          <fpage>9</fpage>
          <lpage>17</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.ubicc.org/files/pdf/507_507.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xing</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Pei</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Keogh</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>A brief survey on sequence classification</article-title>
          <source>SIGKDD Explor. Newsl</source>
          <year>2010</year>
          <month>11</month>
          <day>09</day>
          <volume>12</volume>
          <issue>1</issue>
          <fpage>40</fpage>
          <lpage>48</lpage>
          <pub-id pub-id-type="doi">10.1145/1882471.1882478</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Keogh</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Time Series Classification under More Realistic Assumptions</article-title>
          <year>2013</year>
          <conf-name>Proceedings of the 2013 SIAM International Conference on Data Mining</conf-name>
          <conf-date>May 2-4, 2013</conf-date>
          <conf-loc>Texas, USA</conf-loc>
          <publisher-loc>Philadelphia, PA</publisher-loc>
          <publisher-name>Society for Industrial and Applied Mathematics</publisher-name>
          <fpage>578</fpage>
          <lpage>586</lpage>
          <pub-id pub-id-type="doi">10.1137/1.9781611972832.64</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Drezewski</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Dziuban</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Hernik</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Paczek</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Comparison of data mining techniques for Money Laundering Detection System</article-title>
          <year>2015</year>
          <conf-name>2015 International Conference on Science in Information Technology (ICSITech)</conf-name>
          <conf-date>October 27-28, 2015</conf-date>
          <conf-loc>Yogyakarta, Indonesia</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>5</fpage>
          <lpage>10</lpage>
          <pub-id pub-id-type="doi">10.1109/icsitech.2015.7407767</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lesh</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Zaki</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ogihara</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Mining features for sequence classification</article-title>
          <source>Proceedings of the Fifth ACM SIGKDD International Conference on Knowledge Discovery and Data Mining - KDD 99</source>
          <year>1999</year>
          <conf-name>KDD99: The First Annual International Conference on Knowledge Discovery in Data</conf-name>
          <conf-date>August 15-18, 1999</conf-date>
          <conf-loc>San Diego, CA</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>ACM</publisher-name>
          <fpage>342</fpage>
          <lpage>346</lpage>
          <pub-id pub-id-type="doi">10.1145/312129.312275</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tseng</surname>
              <given-names>VSM</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>CH</given-names>
            </name>
          </person-group>
          <article-title>CBS: A new classification method by using sequential patterns</article-title>
          <year>2005</year>
          <conf-name>2005 SIAM International Conference on Data Mining (SDM 2005)</conf-name>
          <conf-date>April 21-23, 2005</conf-date>
          <conf-loc>Newport Beach, CA</conf-loc>
          <publisher-name>Society for Industrial and Applied Mathematics</publisher-name>
          <fpage>596</fpage>
          <lpage>600</lpage>
          <pub-id pub-id-type="doi">10.1137/1.9781611972757.68</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jiménez</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Sanchez</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Juarez</surname>
              <given-names>JM</given-names>
            </name>
          </person-group>
          <article-title>Multi-objective evolutionary algorithms for fuzzy classification in survival prediction</article-title>
          <source>Artif Intell Med</source>
          <year>2014</year>
          <month>03</month>
          <volume>60</volume>
          <issue>3</issue>
          <fpage>197</fpage>
          <lpage>219</lpage>
          <pub-id pub-id-type="doi">10.1016/j.artmed.2013.12.006</pub-id>
          <pub-id pub-id-type="medline">24525210</pub-id>
          <pub-id pub-id-type="pii">S0933-3657(13)00166-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Geng</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Hamilton</surname>
              <given-names>HJ</given-names>
            </name>
          </person-group>
          <article-title>Interestingness measures for data mining</article-title>
          <source>ACM Comput. Surv</source>
          <year>2006</year>
          <month>09</month>
          <day>30</day>
          <volume>38</volume>
          <issue>3</issue>
          <fpage>9</fpage>
          <pub-id pub-id-type="doi">10.1145/1132960.1132963</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Fu</surname>
              <given-names>AW-c</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Jin</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>McAullay</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Williams</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Sparks</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Kelman</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Mining risk patterns in medical data</article-title>
          <source>KDD '05: Proceedings of the Eleventh ACM SIGKDD International Conference on Knowledge Discovery in Data Mining</source>
          <year>2005</year>
          <conf-name>KDD05: The Eleventh ACM SIGKDD International Conference on Knowledge Discovery and Data Mining</conf-name>
          <conf-date>August 21-24, 2005</conf-date>
          <conf-loc>Chicago, IL</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>ACM</publisher-name>
          <fpage>770</fpage>
          <lpage>775</lpage>
          <pub-id pub-id-type="doi">10.1145/1081870.1081971</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Fu</surname>
              <given-names>AW</given-names>
            </name>
            <name name-style="western">
              <surname>Fahey</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Efficient discovery of risk patterns in medical data</article-title>
          <source>Artif Intell Med</source>
          <year>2009</year>
          <month>01</month>
          <volume>45</volume>
          <issue>1</issue>
          <fpage>77</fpage>
          <lpage>89</lpage>
          <pub-id pub-id-type="doi">10.1016/j.artmed.2008.07.008</pub-id>
          <pub-id pub-id-type="medline">18783927</pub-id>
          <pub-id pub-id-type="pii">S0933-3657(08)00090-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Bohlscheid</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Debt Detection in Social Security by Adaptive Sequence Classification</article-title>
          <source>Lecture Notes in Computer Science. Vol 5914 LNAI</source>
          <year>2009</year>
          <conf-name>Knowledge Science, Engineering and Management. KSEM 2009</conf-name>
          <conf-date>november 25-27</conf-date>
          <conf-loc>Vienna, Austria</conf-loc>
          <publisher-loc>Berlin Heidelberg</publisher-loc>
          <publisher-name>Karagiannis D, Jin Z. eds. Springer</publisher-name>
          <fpage>192</fpage>
          <lpage>203</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-642-10488-6_21</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Heierman</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Youngblood</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Cook</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Mining temporal sequences to discover interesting patterns</article-title>
          <year>2004</year>
          <conf-name>Third International Workshop on Mining Temporal and Sequential Data (TDM-04)</conf-name>
          <conf-date>August 22, 2004</conf-date>
          <conf-loc>Seattle, WA</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Petitjean</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Tatti</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Webb</surname>
              <given-names>GI</given-names>
            </name>
          </person-group>
          <article-title>Skopus: Mining top-k sequential patterns under leverage</article-title>
          <source>Data Min Knowl Disc</source>
          <year>2016</year>
          <month>6</month>
          <day>14</day>
          <volume>30</volume>
          <issue>5</issue>
          <fpage>1086</fpage>
          <lpage>1111</lpage>
          <pub-id pub-id-type="doi">10.1007/s10618-016-0467-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Liao</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>A sequence classification model based on pattern coverage rate</article-title>
          <source>Lecture Notes in Computer Science, vol 7861. Springer</source>
          <year>2013</year>
          <conf-name>Grid and Pervasive Computing: GPC 2013</conf-name>
          <conf-date>May 9-11</conf-date>
          <conf-loc>Seoul, Korea</conf-loc>
          <publisher-loc>Berlin, Heidelberg, Germany</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>737</fpage>
          <lpage>745</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-642-38027-3_81</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Toma</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Abu-Hanna</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bosman</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Discovery and integration of univariate patterns from daily individual organ-failure scores for intensive care mortality prediction</article-title>
          <source>Artif Intell Med</source>
          <year>2008</year>
          <month>05</month>
          <volume>43</volume>
          <issue>1</issue>
          <fpage>47</fpage>
          <lpage>60</lpage>
          <pub-id pub-id-type="doi">10.1016/j.artmed.2008.01.002</pub-id>
          <pub-id pub-id-type="medline">18394871</pub-id>
          <pub-id pub-id-type="pii">S0933-3657(08)00015-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Toma</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Bosman</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Siebes</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Peek</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Abu-Hanna</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Learning predictive models that use pattern discovery--a bootstrap evaluative approach applied in organ functioning sequences</article-title>
          <source>J Biomed Inform</source>
          <year>2010</year>
          <month>08</month>
          <volume>43</volume>
          <issue>4</issue>
          <fpage>578</fpage>
          <lpage>86</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1532-0464(10)00037-7"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jbi.2010.03.004</pub-id>
          <pub-id pub-id-type="medline">20332034</pub-id>
          <pub-id pub-id-type="pii">S1532-0464(10)00037-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ghosh</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Multivariate Sequential Contrast Pattern Mining and Prediction Models for Critical Care Clinical Informatics (Thesis)</article-title>
          <source>OPUS</source>
          <year>2017</year>
          <access-date>2022-07-25</access-date>
          <publisher-name>University of Technology Sydney</publisher-name>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://hdl.handle.net/10453/123204">http://hdl.handle.net/10453/123204</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sheppard</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Hemington-Gorse</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Shelley</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Philp</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Dziewulski</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Prognostic scoring systems in burns: a review</article-title>
          <source>Burns</source>
          <year>2011</year>
          <month>12</month>
          <volume>37</volume>
          <issue>8</issue>
          <fpage>1288</fpage>
          <lpage>95</lpage>
          <pub-id pub-id-type="doi">10.1016/j.burns.2011.07.017</pub-id>
          <pub-id pub-id-type="medline">21940104</pub-id>
          <pub-id pub-id-type="pii">S0305-4179(11)00232-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Casanova</surname>
              <given-names>IJ</given-names>
            </name>
            <name name-style="western">
              <surname>Campos</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Juarez</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Fernandez-Fernandez-Arroyo</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lorente</surname>
              <given-names>JA</given-names>
            </name>
          </person-group>
          <article-title>Using Multivariate Sequential Patterns to Improve Survival Prediction in Intensive Care Burn Unit</article-title>
          <source>Lecture Notes in Computer Science, vol 9105</source>
          <year>2015</year>
          <conf-name>AIME 2015: Artificial Intelligence in Medicine</conf-name>
          <conf-date>June 17-20</conf-date>
          <conf-loc>Pavia, Italy</conf-loc>
          <publisher-loc>Cham, Switzerland</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>277</fpage>
          <lpage>286</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-319-19551-3_36</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Allen</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Maintaining Knowledge about Temporal Intervals</article-title>
          <source>Readings in Qualitative Reasoning About Physical Systems</source>
          <year>2013</year>
          <volume>11</volume>
          <issue>26</issue>
          <fpage>361</fpage>
          <lpage>372</lpage>
          <pub-id pub-id-type="doi">10.1016/b978-1-4832-1447-4.50033-x</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gomariz</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Techniques for the Discovery of Temporal Patterns (PhD Thesis)</article-title>
          <source>University of Murcia (Spain), University of Antwerp (Belgium)</source>
          <year>2014</year>
          <access-date>2022-07-25</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://hdl.handle.net/10201/38109">http://hdl.handle.net/10201/38109</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Efficient mining of emerging patterns</article-title>
          <source>Proceedings of the Fifth ACM SIGKDD International Conference on Knowledge Discovery and Data Mining. KDD '99</source>
          <year>1999</year>
          <conf-name>KDD99: The First Annual International Conference on Knowledge Discovery in Data</conf-name>
          <conf-date>August 15-18, 1999</conf-date>
          <conf-loc>San Diego, CA</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>ACM</publisher-name>
          <fpage>43</fpage>
          <lpage>52</lpage>
          <pub-id pub-id-type="doi">10.1145/312129.312191</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Discovering Jumping Emerging Patterns and Experiments on Real Data sets</article-title>
          <year>1999</year>
          <month>7</month>
          <conf-name>9th International Database Conference on Heterogeneous and Internet Databases (IDC)</conf-name>
          <conf-date>July 15-17, 1999</conf-date>
          <conf-loc>Hong Kong</conf-loc>
          <fpage>15</fpage>
          <lpage>17</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://corescholar.libraries.wright.edu/knoesis/402"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Ramamohanarao</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Making Use of the Most Expressive Jumping Emerging Patterns for Classification</article-title>
          <source>Knowledge and Information Systems</source>
          <year>2001</year>
          <month>05</month>
          <volume>3</volume>
          <issue>2</issue>
          <fpage>131</fpage>
          <lpage>145</lpage>
          <pub-id pub-id-type="doi">10.1007/pl00011662</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>CAEP: Classification by aggregating emerging patterns</article-title>
          <source>Lecture Notes in Computer Science. Vol 1721</source>
          <year>1999</year>
          <conf-name>International Conference on Discovery Science (DS 1999)</conf-name>
          <conf-date>December, 6-8</conf-date>
          <conf-loc>Tokyo, Japan</conf-loc>
          <publisher-name>Springer Berlin Heidelberg</publisher-name>
          <fpage>30</fpage>
          <lpage>42</lpage>
          <pub-id pub-id-type="doi">10.1007/3-540-46846-3_4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Toivonen</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Satou</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Discovering statistically non-redundant subgroups</article-title>
          <source>Knowledge-Based Systems</source>
          <year>2014</year>
          <month>09</month>
          <volume>67</volume>
          <fpage>315</fpage>
          <lpage>327</lpage>
          <pub-id pub-id-type="doi">10.1016/j.knosys.2014.04.030</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Toti</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Vilalta</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Lindner</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Price</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Effect of the Definition of Non-Exposed Population in Risk Pattern Mining</article-title>
          <year>2016</year>
          <month>01</month>
          <conf-name>In 5th Workshop on Data Mining for Medicine and Healthcare</conf-name>
          <conf-date>May 7, 2016</conf-date>
          <conf-loc>Miami, FL</conf-loc>
          <fpage>5</fpage>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Toti</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Vilalta</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Lindner</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Lefer</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Macias</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Price</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Analysis of correlation between pediatric asthma exacerbation and exposure to pollutant mixtures with association rule mining</article-title>
          <source>Artif Intell Med</source>
          <year>2016</year>
          <month>11</month>
          <volume>74</volume>
          <fpage>44</fpage>
          <lpage>52</lpage>
          <pub-id pub-id-type="doi">10.1016/j.artmed.2016.11.003</pub-id>
          <pub-id pub-id-type="medline">27964802</pub-id>
          <pub-id pub-id-type="pii">S0933-3657(15)30103-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Casanova</surname>
              <given-names>IJ</given-names>
            </name>
            <name name-style="western">
              <surname>Campos</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Juarez</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Fernandez-Fernandez-Arroyo</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lorente</surname>
              <given-names>JA</given-names>
            </name>
          </person-group>
          <article-title>Impact of time series discretization on intensive care burn unit survival classification</article-title>
          <source>Prog Artif Intell</source>
          <year>2017</year>
          <month>6</month>
          <day>8</day>
          <volume>7</volume>
          <issue>1</issue>
          <fpage>41</fpage>
          <lpage>53</lpage>
          <pub-id pub-id-type="doi">10.1007/s13748-017-0130-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Daud</surname>
              <given-names>NR</given-names>
            </name>
            <name name-style="western">
              <surname>Corne</surname>
              <given-names>DW</given-names>
            </name>
          </person-group>
          <article-title>Human readable rule induction in medical data mining</article-title>
          <source>Lecture Notes in Electrical Engineering. Vol 27 LNEE</source>
          <year>2009</year>
          <conf-name>Proceedings of the European Computing Conference</conf-name>
          <conf-date>June 26 - 28, 2009</conf-date>
          <conf-loc>Tbilisi Georgia</conf-loc>
          <publisher-loc>Boston, MA</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>787</fpage>
          <lpage>798</lpage>
          <pub-id pub-id-type="doi">10.1007/978-0-387-84814-3_79</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mohamed</surname>
              <given-names>WNHW</given-names>
            </name>
            <name name-style="western">
              <surname>Salleh</surname>
              <given-names>MNM</given-names>
            </name>
            <name name-style="western">
              <surname>Omar</surname>
              <given-names>AH</given-names>
            </name>
          </person-group>
          <article-title>A comparative study of Reduced Error Pruning method in decision tree algorithms</article-title>
          <year>2012</year>
          <conf-name>2012 IEEE International Conference on Control System Computing and Engineering, ICCSCE 2012</conf-name>
          <conf-date>23 – 25 November 2012</conf-date>
          <conf-loc>Penang, Malaysia</conf-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>392</fpage>
          <lpage>397</lpage>
          <pub-id pub-id-type="doi">10.1109/iccsce.2012.6487177</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Gu</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>Discriminative pattern mining and its applications in bioinformatics</article-title>
          <source>Brief Bioinform</source>
          <year>2015</year>
          <month>09</month>
          <day>28</day>
          <volume>16</volume>
          <issue>5</issue>
          <fpage>884</fpage>
          <lpage>900</lpage>
          <pub-id pub-id-type="doi">10.1093/bib/bbu042</pub-id>
          <pub-id pub-id-type="medline">25433466</pub-id>
          <pub-id pub-id-type="pii">bbu042</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
