<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMI</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Med Inform</journal-id>
      <journal-title>JMIR Medical Informatics</journal-title>
      <issn pub-type="epub">2291-9694</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
    <article-id pub-id-type="publisher-id">v4i3e23</article-id>
    <article-id pub-id-type="pmid">27370070</article-id>
    <article-id pub-id-type="doi">10.2196/medinform.5660</article-id>
    <article-categories>
      <subj-group subj-group-type="heading">
        <subject>Original Paper</subject>
      </subj-group>
      <subj-group subj-group-type="article-type">
        <subject>Original Paper</subject>
      </subj-group>
    </article-categories>
    <title-group>
      <article-title>Evaluation of an Expert System for the Generation of Speech and Language Therapy Plans</article-title>
    </title-group>
    <contrib-group>
      <contrib contrib-type="editor">
        <name>
          <surname>Eysenbach</surname>
          <given-names>Gunther</given-names>
        </name>
      </contrib>
    </contrib-group>
    <contrib-group>
      <contrib contrib-type="reviewer">
        <name>
          <surname>Wang</surname>
          <given-names>William Yang</given-names>
        </name>
      </contrib>
      <contrib contrib-type="reviewer">
        <name>
          <surname>Tobolcea</surname>
          <given-names>I</given-names>
        </name>
      </contrib>
    </contrib-group>
    <contrib-group>
      <contrib contrib-type="author" id="contrib1" corresp="yes">
      <name name-style="western">
        <surname>Robles-Bykbaev</surname>
        <given-names>Vladimir</given-names>
      </name>
      <degrees>MSc, Eng</degrees>
      <xref rid="aff1" ref-type="aff">1</xref>
      <address>
        <institution>Grupo de Investigación en Inteligencia Artificial y Tecnologías de Asistencia</institution>
        <institution>Universidad Politécnica Salesiana</institution>
        <addr-line>Calle Vieja, 12-30</addr-line>
        <addr-line>Elia Liut</addr-line>
        <addr-line>Cuenca, 010102</addr-line>
        <country>Ecuador</country>
        <phone>593 7862213 ext 1278</phone>
        <fax>593 7862213</fax>
        <email>vrobles@ups.edu.ec</email>
      </address>  
      <xref rid="aff2" ref-type="aff">2</xref>
      <ext-link ext-link-type="orcid">http://orcid.org/0000-0002-7645-8793</ext-link></contrib>
      <contrib contrib-type="author" id="contrib2">
        <name name-style="western">
          <surname>López-Nores</surname>
          <given-names>Martín</given-names>
        </name>
        <degrees>PhD</degrees>
        <xref rid="aff2" ref-type="aff">2</xref>
        <ext-link ext-link-type="orcid">http://orcid.org/0000-0002-4802-607X</ext-link>
      </contrib>
      <contrib contrib-type="author" id="contrib3">
        <name name-style="western">
          <surname>García-Duque</surname>
          <given-names>Jorge</given-names>
        </name>
        <degrees>PhD</degrees>
        <xref rid="aff2" ref-type="aff">2</xref>
        <ext-link ext-link-type="orcid">http://orcid.org/0000-0001-7239-5863</ext-link>
      </contrib>
      <contrib contrib-type="author" id="contrib4">
        <name name-style="western">
          <surname>Pazos-Arias</surname>
          <given-names>José J</given-names>
        </name>
        <degrees>PhD</degrees>
        <xref rid="aff2" ref-type="aff">2</xref>
        <ext-link ext-link-type="orcid">http://orcid.org/0000-0002-0424-5481</ext-link>
      </contrib>
      <contrib contrib-type="author" id="contrib5">
        <name name-style="western">
          <surname>Arévalo-Lucero</surname>
          <given-names>Daysi</given-names>
        </name>
        <degrees>Eng</degrees>
        <xref rid="aff1" ref-type="aff">1</xref>
        <ext-link ext-link-type="orcid">http://orcid.org/0000-0001-7512-4233</ext-link>
      </contrib>
    </contrib-group>
    <aff id="aff1">
    <sup>1</sup>
    <institution>Grupo de Investigación en Inteligencia Artificial y Tecnologías de Asistencia</institution>
    <institution>Universidad Politécnica Salesiana</institution>  
    <addr-line>Cuenca</addr-line>
    <country>Ecuador</country></aff>
    <aff id="aff2">
    <sup>2</sup>
    <institution>AtlantTIC Research Center for Information and Communication Technologies</institution>
    <institution>Department of Telematics Engineering</institution>  
    <institution>University of Vigo</institution>  
    <addr-line>Vigo</addr-line>
    <country>Spain</country></aff>
    <author-notes>
      <corresp>Corresponding Author: Vladimir Robles-Bykbaev 
      <email>vrobles@ups.edu.ec</email></corresp>
    </author-notes>
    <pub-date pub-type="collection"><season>Jul-Sep</season><year>2016</year></pub-date>
    <pub-date pub-type="epub">
      <day>01</day>
      <month>07</month>
      <year>2016</year>
    </pub-date>
    <volume>4</volume>
    <issue>3</issue>
    <elocation-id>e23</elocation-id>
    <!--history from ojs - api-xml-->
    <history>
      <date date-type="received">
        <day>18</day>
        <month>2</month>
        <year>2016</year>
      </date>
      <date date-type="rev-request">
        <day>31</day>
        <month>3</month>
        <year>2016</year>
      </date>
      <date date-type="rev-recd">
        <day>10</day>
        <month>5</month>
        <year>2016</year>
      </date>
      <date date-type="accepted">
        <day>11</day>
        <month>6</month>
        <year>2016</year>
      </date>
    </history>
    <!--(c) the authors - correct author names and publication date here if necessary. Date in form ', dd.mm.yyyy' after jmir.org-->
    <copyright-statement>©Vladimir Robles-Bykbaev, Martín López-Nores, Jorge García-Duque, José J Pazos-Arias, Daysi Arévalo-Lucero. Originally published in JMIR Medical Informatics (http://medinform.jmir.org), 01.07.2016.</copyright-statement>
    <copyright-year>2016</copyright-year>
    <license license-type="open-access" xlink:href="http://creativecommons.org/licenses/by/2.0/">
      <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (http://creativecommons.org/licenses/by/2.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Informatics, is properly cited. The complete bibliographic information, a link to the original publication on http://medinform.jmir.org/, as well as this copyright and license information must be included.</p>
    </license>  
    <self-uri xlink:href="http://medinform.jmir.org/2016/3/e23/" xlink:type="simple"/>
    <abstract>
      <sec sec-type="background">
        <title>Background</title>
        <p>Speech and language pathologists (SLPs) deal with a wide spectrum of disorders, arising from many different conditions, that affect voice, speech, language, and swallowing capabilities in different ways. Therefore, the outcomes of Speech and Language Therapy (SLT) are highly dependent on the accurate, consistent, and complete design of personalized therapy plans. However, SLPs often have very limited time to work with their patients and to browse the large (and growing) catalogue of activities and specific exercises that can be put into therapy plans. As a consequence, many plans are suboptimal and fail to address the specific needs of each patient.</p>
      </sec>
      <sec sec-type="objective">
        <title>Objective</title>
        <p>We aimed to evaluate an expert system that automatically generates plans for speech and language therapy, containing semiannual activities in the five areas of hearing, oral structure and function, linguistic formulation, expressive language and articulation, and receptive language. The goal was to assess whether the expert system speeds up the SLPs’ work and leads to more accurate, consistent, and complete therapy plans for their patients.</p>
      </sec>
      <sec sec-type="methods">
        <title>Methods</title>
        <p>We examined the evaluation results of the SPELTA expert system in supporting the decision making of 4 SLPs treating children in three special education institutions in Ecuador. The expert system was first trained with data from 117 cases, including medical data; diagnosis for voice, speech, language and swallowing capabilities; and therapy plans created manually by the SLPs. It was then used to automatically generate new therapy plans for 13 new patients. The SLPs were finally asked to evaluate the accuracy, consistency, and completeness of those plans. A four-fold cross-validation experiment was also run on the original corpus of 117 cases in order to assess the significance of the results.</p>
      </sec>
      <sec sec-type="results">
        <title>Results</title>
        <p>The evaluation showed that 87% of the outputs provided by the SPELTA expert system were considered valid therapy plans for the different areas. The SLPs rated the overall accuracy, consistency, and completeness of the proposed activities with 4.65, 4.6, and 4.6 points (to a maximum of 5), respectively. The ratings for the subplans generated for the areas of hearing, oral structure and function, and linguistic formulation were nearly perfect, whereas the subplans for expressive language and articulation and for receptive language failed to deal properly with some of the subject cases. Overall, the SLPs indicated that over 90% of the subplans generated automatically were “better than” or “as good as” what the SLPs would have created manually if given the average time they can devote to the task. The cross-validation experiment yielded very similar results.</p>
      </sec>
      <sec sec-type="conclusions">
        <title>Conclusions</title>
        <p>The results show that the SPELTA expert system provides valuable input for SLPs to design proper therapy plans for their patients, in a shorter time and considering a larger set of activities than proceeding manually. The algorithms worked well even in the presence of a sparse corpus, and the evidence suggests that the system will become more reliable as it is trained with more subjects.</p>
      </sec>
    </abstract>
    <kwd-group>
      <kwd>speech-language pathology</kwd>
      <kwd>rehabilitation of speech and language disorders</kwd>
      <kwd>decision support systems, clinical</kwd>
      <kwd>expert systems</kwd>
    </kwd-group></article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Developing and maintaining proper communication skills is a mainstay for every individual to express needs, to learn, to be related with the environment and, in general, to have the opportunity to participate as an active member of society. According to the World Health Organization, individuals with communication difficulties are at a significant social disadvantage in both developing and developed countries [<xref ref-type="bibr" rid="ref1">1</xref>]. This disadvantage often affects a person’s emotional and social life and can compromise educational and job opportunities, particularly in sectors where effective communication is critical, such as health care, education, local government, and justice.</p>
      <p>Speech and language therapy (SLT) is an area of health care focused on the evaluation and treatment of a broad range of disorders, which can be roughly classified as affecting voice, speech, language, or swallowing capabilities. Disorders like selective mutism, dysarthria, aphasia, and dysphagia have a substantial impact on quality of life and human potential, whether they affect children who stutter as they struggle to speak up in class, lawyers or teachers with adult-onset voice disorders, or post-stroke individuals laboring to communicate verbally. Numerous studies about the incidence and prevalence of communication disorders in developed countries depict similar realities for Europe, Canada, Australia, and the United States [<xref ref-type="bibr" rid="ref2">2</xref>-<xref ref-type="bibr" rid="ref5">5</xref>]. Based on figures like the 7.5 million people in the United States who have voice disorders, the 3 million who stutter, and the 6-8 million who have been diagnosed with some form of language impairment, the American Speech-Language-Hearing Association estimates that 40 million Americans are affected by communication disorders, costing the nation US $154-184 billion annually [<xref ref-type="bibr" rid="ref6">6</xref>]. It is estimated that more than 60 million people in the European Union are affected, with an estimated cost of €220-260 billion. As the population ages and survival odds improve for fragile infants and individuals who have sustained injury or acquired disease, the number of people with communication disorders will likely continue to increase [<xref ref-type="bibr" rid="ref7">7</xref>].</p>
      <p>Notwithstanding the societal and economic impact of communication disorders, SLT remains a largely overlooked area of health care. The latest World Report on Disability highlights that many countries suffer from lack of professionals, services, and structures to provide effective assessment, diagnosis, counseling, intervention, and treatment for people suffering from communication disorders [<xref ref-type="bibr" rid="ref8">8</xref>]. In such conditions, speech and language pathologists (SLPs) have very limited time to work with their patients. This may mean that the diagnosis may fail to accurately identify the causes of the disorders, that the designed therapy plans may be suboptimal (eg, because the SLPs fail to keep in mind the whole set of activities they could apply), or that the treatment may be insufficient or not properly applied [<xref ref-type="bibr" rid="ref7">7</xref>]. In this respect, Turnbull et al [<xref ref-type="bibr" rid="ref9">9</xref>] found that only 19.2% of young people (from birth to 21 years old) who have communication disorders are actually receiving some form of specific care. Mackenzie et al [<xref ref-type="bibr" rid="ref10">10</xref>] surveyed SLT provision for people with aphasia in the United Kingdom and found many areas reported low staffing levels and were thus unable to provide the recommended care or a comprehensive service. Code and Heron [<xref ref-type="bibr" rid="ref11">11</xref>] also concluded that people with aphasia receive significantly less therapy than national recommendations suggest.</p>
      <p>Over the last decade, many research efforts have separately shown evidence that the application of information and communication technology (ICT) has great potential to improve the quality and efficiency of SLT practice, as well as health outcomes and patients’ quality of life. There have been several approaches to automate diagnostic tests by means of audiovisual signal processing [<xref ref-type="bibr" rid="ref12">12</xref>-<xref ref-type="bibr" rid="ref15">15</xref>] and to automate the generation of therapy plans for specific disorders [<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref17">17</xref>]. In this paper, we evaluate the support provided to SLPs by the SPELTA (SPEech and Language Therapy Assistant) expert system presented by Robles-Bykbaev et al [<xref ref-type="bibr" rid="ref18">18</xref>], which aims to automatically generate therapy plans for SLT, containing semiannual activities and daily exercises for an unrestricted range of disorders affecting the five areas of hearing, oral structure and function, linguistic formulation, expressive language and articulation, and receptive language. The goal is to assess whether the expert system can speed up the SLPs’ work and lead to more accurate, consistent, and complete therapy plans for their patients.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>SPELTA Expert System</title>
        <p>The SPELTA expert system is one part of a set of ICT tools developed by Universidad Politécnica Salesiana (Ecuador) and Universidade de Vigo (Spain) to support SLT within an integrative environment for clinicians and students, pathologists, patients, relatives, and other potential users [<xref ref-type="bibr" rid="ref19">19</xref>]. The environment is based on a formal knowledge model of the SLT domain and leans on OpenEHR solutions to support the storage and exchange of health-related data. As depicted in <xref ref-type="fig" rid="figure1">Figure 1</xref>, the SPELTA system is involved with the automatic generation of therapy plans for new subjects, based on two sources of information: (1) domain ontologies that interrelate the activities and the exercises with specific diseases, speech-language disorders, and skills, and (2) the corpus of patient profiles, containing the compendium of data, plans, and evaluations of previous patients.</p>
        <p>Specifically, the profile of an SLT patient contains the following data:</p>
        <list list-type="bullet">
          <list-item>
            <p>Personal data, including chronological age, gender, name, etc.</p>
          </list-item>
          <list-item>
            <p>A medical record specifying diagnosis, general medical conditions and related disorders (eg, cerebral palsy, hemiparesis, athetosis), as indicated by doctors.</p>
          </list-item>
          <list-item>
            <p>A record of cognitive development data, indicating cognitive age, gap in language development, expressive language age, and receptive language age (as estimated by SLPs).</p>
          </list-item>
          <list-item>
            <p>An SLT evaluation that looks at 102 parameters from the five SL areas:</p>
          </list-item>
        </list>
        <p>1. Hearing—subjective evaluation of the auditory condition: reflex, localization of sound sources, and response to voice.</p>
        <p>2. Oral structure &#38; function—tongue, teeth, palate, lips, and maxillary mobility.</p>
        <p>3. Linguistic formulation—phonation and breathing condition.</p>
        <p>4. Expressive language and articulation—vocal development, social communication, semantics (content)-vocabulary and concepts, structure (form)-morphology and syntax, and integrative thinking skills; pronunciation of phonemes, sentences, polysyllabic words, and vowel phonemes.</p>
        <p>5. Receptive language—attention, semantics (context)-vocabulary and concepts, structure (form)-morphology and syntax, and integration skills.</p>
        <list list-type="bullet">
          <list-item>
            <p>A therapy plan, containing five subplans with lists of semiannual activities and daily exercises for each one of the SL areas. One example of activity could be “perform blow exercises to increase the blowing force.” Two specific exercises related to this activity could be “blow confetti 10 times during 2 seconds” or “inflate one balloon in no more than 6 exhalations.”</p>
          </list-item>
          <list-item>
            <p>Control evaluations with the results of successive therapy sessions.</p>
          </list-item>
        </list>
        <p>Internally, the SPELTA system relies on an implementation of the Partition Around Medoids algorithm to generate clusters of patient profiles with two levels of granularity [<xref ref-type="bibr" rid="ref20">20</xref>]. The generation of a new therapy plan is dealt with as a classification problem, looking for the most similar cases in each one of the five SL areas according to the K-Nearest Neighbors criterion [<xref ref-type="bibr" rid="ref21">21</xref>]. First-level clusters represent groups of patients who may have similar speech-language skills and limitations, but possibly arising from (or linked to) different medical conditions. To create these groups, we use the distance metrics of <xref ref-type="fig" rid="figure2">Figure 2</xref>, where <italic>S</italic><sub>i</sub> and <italic>S</italic><sub>j</sub> refer to two different subjects, <italic>A</italic> is one of the SL areas, <italic>f</italic> goes over the set of features from the medical records relevant for that area (<italic>features</italic><sub>MR</sub><italic>(A)</italic>), and ManhDist denotes the mean-Manhattan binary distance [<xref ref-type="bibr" rid="ref18">18</xref>].</p>
        <p>In the second level, the subjects are clustered according to the fine-grained evaluation of the record of cognitive development data and the initial SLT evaluation. For example, within a first-level cluster that includes the cases of children with Down syndrome and phonological disorders, we need to differentiate subjects who commit additions (ie, adding extra sounds in some words, eg, “balue” for “blue”) from subjects who commit substitutions (ie, one or more sounds are substituted for others, eg, “bagon” for “wagon”). In this case, we use the distance metrics of <xref ref-type="fig" rid="figure3">Figure 3</xref>. The first summation measures the mean-Manhattan binary distance of the initial SLT evaluations of two subjects, considering only the dimensions relevant to the speech-language area in question, <italic>dimensions</italic><sub>IE</sub><italic>(A)</italic>. The second summation provides a scale factor derived from the absolute differences of cognitive age, gap in language development, expressive language age, and receptive language age (the features of cognitive development data) [<xref ref-type="bibr" rid="ref18">18</xref>].</p>
        <p><xref ref-type="fig" rid="figure4">Figure 4</xref> depicts an example of the cluster structure generated by SPELTA for each of the SL areas we consider. Each one of the first-level and second-level clusters has one of the subject cases designated as a medoid, rather than a fictitious case computed by averaging. This facilitates the classification of new cases, identifying the closest subjects in each one of the SL areas.</p>
        <p>The plans provided by the SPELTA system are presented to SLPs through visual interfaces, so that they can validate it as a whole or modify certain parts, as they deem necessary. To facilitate the task, the interfaces show which cases were found to be closest in each one of the SL areas. If several subjects were found to be equally distant to the new one in some of the areas, then it is possible to browse the superset of activities, the intersections, and the disjunctions. As an example, <xref ref-type="table" rid="table1">Table 1</xref> shows the activities of one master plan generated by the SPELTA system, with the third column indicating the most similar subjects in each area and the features that make them similar to the new case. The profile description is as follows: age 15 years, 8 months; medical diagnosis of athetoid cerebral palsy (ICD-10-CM code G80.3); speech and language diagnosis of mixed receptive-expressive language disorder (ICD-10-CM code F80.1); receptive language age of 4 years; expressive language age of 2 years, 8 months; and a language developmental age of 3 years, 4 months.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>The activities of a sample therapy plan provided by the SPELTA system (Case 52).</p>
          </caption>
          <table width="665" cellpadding="7" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="124"/>
            <col width="232"/>
            <col width="335"/>
            <thead>
              <tr valign="top">
                <td>Area</td>
                <td>Activities</td>
                <td>Source subplans</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td rowspan="3">Hearing</td>
                <td>Perform exercises to sounds identification.</td>
                <td rowspan="3">Case 37: a patient with a similar receptive language age (4 years, 6 months) and a 100% coincidence in the evaluation of hearing (cochleo-palpebral reflex, startle response, turns head to sound source, identifying sound objects, sound source localization without visual stimulus).</td>
              </tr>
              <tr valign="top">
                <td>Discriminate sounds of nature, body, and animals.</td>
              </tr>
              <tr valign="top">
                <td>Perform phonemes discrimination exercises.</td>
              </tr>
              <tr valign="top">
                <td rowspan="5">Oral structure &#38; function</td>
                <td>Perform segmental relaxation massages.</td>
                <td rowspan="5">Case 18: a patient with an 84% coincidence in the oral peripheral mechanism (same tongue size, same speed in tongue movements, present tongue protrusion, voluntary and involuntary swallowing are present, is able to chew hard and soft food, sialorrhea is not present).</td>
              </tr>
              <tr valign="top">
                <td>Perform slow and fast tongue movements.</td>
              </tr>
              <tr valign="top">
                <td>Perform exercises with lips (retraction and protrusion).</td>
              </tr>
              <tr valign="top">
                <td>Achieve sound productions using the oropharynx structure.</td>
              </tr>
              <tr valign="top">
                <td>Perform active and passive exercises using tongue, lips, and jaw.</td>
              </tr>
              <tr valign="top">
                <td rowspan="2">Linguistic formulation</td>
                <td>Work in the automatic respiration process (inspirations and expirations), and work with blow exercises to increase the blowing force.</td>
                <td>Case 22: a patient with a 70% coincidence in linguistic formulation (same respiratory frequency, same thorax symmetry, diaphragmatic breathing).</td>
              </tr>
              <tr valign="top">
                <td>Respiration exercises associated to vowels and simple phonemes (/pa/, /da/, /fo/).</td>
                <td>Case 3: a patient with a 70% coincidence in linguistic formulation (diaphragmatic breathing, no nasal obstruction, same exhalation period).</td>
              </tr>
              <tr valign="top">
                <td rowspan="6">Expressive language &#38; articulation</td>
                <td>Construct sentences from a given word.</td>
                <td rowspan="6">Case 22: a patient with a similar expressive language age (1 year, 7 months), similar diagnosis for the medical examination (cerebral palsy and mixed receptive-expressive language disorder) and a 100% coincidence in the speech-language evaluation.</td>
              </tr>
              <tr valign="top">
                <td>Sort out the words of a sentence.</td>
              </tr>
              <tr valign="top">
                <td>Work in grammatical structure.</td>
              </tr>
              <tr valign="top">
                <td>Develop the spontaneous conversation</td>
              </tr>
              <tr valign="top">
                <td>Perform activities that use twisters and rhymes.</td>
              </tr>
              <tr valign="top">
                <td>Work with the personal articulation exercise book.</td>
              </tr>
              <tr valign="top">
                <td rowspan="6">Receptive language</td>
                <td>Work with sequences and puzzles of 4 elements.</td>
                <td rowspan="6">Case 37: a patient with a similar receptive language age (4 years, 6 months), similar diagnosis for the medical examination (cerebral palsy and mixed receptive-expressive language disorder) and a 90% coincidence in the speech-language evaluation (the only difference relates to the use of place prepositions like “under,” “over,” etc).</td>
              </tr>
              <tr valign="top">
                <td>Learn semantic categories</td>
              </tr>
              <tr valign="top">
                <td>Identify objects according to their utility.</td>
              </tr>
              <tr valign="top">
                <td>Identify daily activities.</td>
              </tr>
              <tr valign="top">
                <td>Learn temporal notions (day and night, before and after).</td>
              </tr>
              <tr valign="top">
                <td>Identify similar/distinct objects according to their utility.</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>A block diagram of the SPELTA system.</p>
          </caption>
          <graphic xlink:href="medinform_v4i3e23_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Metric used to determine the distance between two subjects in a specific SL area, according to their profile.</p>
          </caption>
          <graphic xlink:href="medinform_v4i3e23_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Metric used to determine the distance between two subjects within a specific first-level cluster.</p>
          </caption>
          <graphic xlink:href="medinform_v4i3e23_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>The clustering approach of the SPELTA system. This structure is used for each speech-language area.</p>
          </caption>
          <graphic xlink:href="medinform_v4i3e23_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Study Participants and Data Preparation</title>
        <p>For the study presented in this paper, the SPELTA expert system was deployed, along with the accompanying tools, in three special education institutions for children in Ecuador: Instituto de Parálisis Cerebral del Azuay (Institute of Cerebral Palsy of Azuay), Fundación “General Dávalos” (“General Dávalos” Foundation), and CEDEI School. Over the course of 2 years (from September 2012 to September 2014), a team of 4 SLPs progressively created a corpus of 117 children profiles, including the corresponding number of therapy plans created manually by themselves and subsequent control evaluations. Some relevant data from the corpus are included in <xref ref-type="app" rid="app1">Multimedia Appendix 1</xref>. The most common conditions were those of cerebral palsy with/without accompanying dysarthria, dyslalia, epilepsy or dysphasia (n=22), Down syndrome with/without dysarthria or dysphasia (n=19), intellectual disability with/without dysarthria or dysphasia (n=10), autistic disorders (n=9), and fetal alcohol syndrome (n=5). These are the disorders with greatest prevalence in the Ecuadorian province of Azuay.</p>
        <p>The corpus is admittedly small and sparse, implying that certain conditions may occur only a few times and many combinations are not included. However, that sparsity is a representative feature of the SLT area because the range of disabilities and communication disorders is so broad that even if two cases have the same medical diagnosis and similar patient profiles, they can require largely different therapy strategies or the support of different assistive technologies. The SPELTA expert system was precisely designed bearing this problem in mind.</p>
        <p>The collaborating SLPs used the interfaces and services provided by SPELTA to perform an initial screening of each patient, followed by a personalized evaluation of the 102 SL parameters, and finally, the manual design of a proper therapy plan. As shown in <xref ref-type="fig" rid="figure5">Figure 5</xref>, the tools were available on mobile devices as well as desktop computers (see <xref ref-type="fig" rid="figure6">Figures 6</xref>-<xref ref-type="fig" rid="figure8">8</xref>). The patients could use smartphones or tablets to engage in interactive exercises to evaluate some speech-language skills or to receive memory, motor, hearing, and visual stimulation. The mobile apps proved very useful for SLPs to annotate data about patients who suffer from disabilities that affect their motor skills (eg, cerebral palsy, hemiparesis, hemiplegia) because they allow working in a comfortable space for the patient at work or home. In turn, the desktop apps were most useful with patients in a consulting room or in the rehabilitation centers, and to provide remote assistance.</p>
        <fig id="figure5" position="float">
          <label>Figure 5</label>
          <caption>
            <p>Diagram of the interfaces and services provided by the SPELTA system.</p>
          </caption>
          <graphic xlink:href="medinform_v4i3e23_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure6" position="float">
          <label>Figure 6</label>
          <caption>
            <p>Screen capture of the hearing test that can be applied with mobile devices.</p>
          </caption>
          <graphic xlink:href="medinform_v4i3e23_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure7" position="float">
          <label>Figure 7</label>
          <caption>
            <p>Webpage showing the results of patients' skills in the five SLT areas.</p>
          </caption>
          <graphic xlink:href="medinform_v4i3e23_fig7.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure8" position="float">
          <label>Figure 8</label>
          <caption>
            <p>Screen capture of the articulation test on a desktop application.</p>
          </caption>
          <graphic xlink:href="medinform_v4i3e23_fig8.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Evaluation Method</title>
        <p>Having trained its algorithms on the corpus of 117 cases and the corresponding plans, the first stage of the evaluation of the SPELTA expert system involved the generation of therapy plans for the cases of 13 new children (see <xref ref-type="app" rid="app2">Multimedia Appendix 2</xref>). The SLPs discussed whether each one of the automatically generated plans was convenient or not, considering the following criteria.</p>
        <sec>
          <title>Accuracy</title>
          <p>The exercises and activities selected by SPELTA must be adequate to support the development and rehabilitation of one or more skills related to speech and language. For example, if a patient needs to improve speech production, it is necessary that they have proper breathing conditions and adequate control of their lips and tongue. The accuracy criterion refers to whether the exercises and activities within a plan match the skills that should be improved in the patient.</p>
        </sec>
        <sec>
          <title>Consistency</title>
          <p>Each patient’s profile has different characteristics, such as medical diagnosis, developmental language age, chronological age, etc. The consistency criterion is used to analyze whether a plan contains exercises and activities that can be carried out in a proper way with each patient, bearing in mind their capacity to understand the requests, the affected skills, the developmental gap, etc. For example, cases 23 and 32 (see <xref ref-type="app" rid="app1">Multimedia Appendix 1</xref>) represent two patients suffering from Down syndrome who had similar developmental language ages (a difference of only 1 month). However, case 23 presented a developmental gap of 2 years and 1 month, whereas case 32 had a 5-year gap. The consistency criterion provides for dealing with these two cases with different activities and exercises, even though the profiles are similar in terms of medical diagnosis and developmental age.</p>
        </sec>
        <sec>
          <title>Completeness</title>
          <p>In order to have an effective rehabilitation plan, it is necessary to have an adequate number of exercises and activities (not too many or too few). In this line, the completeness criterion is used to determine whether the number and complexity of exercises is adequate for a specific patient. For example, the plan in <xref ref-type="table" rid="table1">Table 1</xref> (generated by the SPELTA system) contains the following activities for the hearing area: perform exercises to sounds identification, discriminate sounds of nature, body and animals, and perform phonemes discrimination exercises. The collaborating SLPs confirmed that those guidelines are appropriate to help developing the skills that allow patients to identify phonemes, to construct words and short sentences, and to develop auditory memory over a period of 6 months. Similarly, the number of knowledge areas related to communication is properly delimited for a patient who has a receptive language age of 4 years.</p>
          <p>As shown in <xref ref-type="fig" rid="figure9">Figure 9</xref>, these criteria were assessed separately for the five subplans of each new plan generated by the SPELTA system, that is, looking at the activities and exercises assigned to each of the five SLT areas. The collaborating SLPs would rate accuracy, consistency, and completeness of each subplan on a 5-point Likert scale, and only the ones that achieved average scores ˃4 were considered valid and were to be used during the therapy process. Additionally, each SLP would provide a binary response to whether each subplan was “better than” or “as good as” the subplan they would have created manually if given the average time that they could devote to the task.</p>
          <p>In order to get further evidence about the statistical significance of the results, we made the experiment to evaluate the SPELTA expert system using a 4-fold cross-validation approach. Specifically, we partitioned the original corpus into 4 sets of 29, 29, 29, and 30 cases, and each cross-validation round consisted of asking the system to provide therapy plans for the cases of each subset, after training it with the cases of the 3 others. The SLPs would discuss whether each one of the automatically generated plans was convenient or not, as above.</p>
          <fig id="figure9" position="float">
            <label>Figure 9</label>
            <caption>
              <p>The evaluation process followed to assess the plans provided by the SPELTA expert system.</p>
            </caption>
            <graphic xlink:href="medinform_v4i3e23_fig9.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Generation of Therapy Plans for New Cases</title>
        <p><xref ref-type="fig" rid="figure10">Figures 10</xref>-<xref ref-type="fig" rid="figure14">14</xref> show the average values obtained on the Likert scale for each of the subplans provided by the SPELTA system when given the input of the 13 new cases: <xref ref-type="fig" rid="figure10">Figure 10</xref> shows the results in the SLT area of hearing, <xref ref-type="fig" rid="figure11">Figure 11</xref> shows oral structure and function, <xref ref-type="fig" rid="figure12">Figure 12</xref> shows linguistic formulation, <xref ref-type="fig" rid="figure13">Figure 13</xref> shows expressive language and articulation, and <xref ref-type="fig" rid="figure14">Figure 14</xref> shows receptive language. The three criteria (accuracy, consistency, and completeness) are represented with different line colors. We can make the following observations per area.</p>
        <fig id="figure10" position="float">
          <label>Figure 10</label>
          <caption>
            <p>Results achieved by the expert system in the area of hearing.</p>
          </caption>
          <graphic xlink:href="medinform_v4i3e23_fig10.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure11" position="float">
          <label>Figure 11</label>
          <caption>
            <p>Results achieved by the expert system in the area of oral structure and function.</p>
          </caption>
          <graphic xlink:href="medinform_v4i3e23_fig11.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure12" position="float">
          <label>Figure 12</label>
          <caption>
            <p>Results achieved by the expert system in the area of linguistic formulation.</p>
          </caption>
          <graphic xlink:href="medinform_v4i3e23_fig12.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure13" position="float">
          <label>Figure 13</label>
          <caption>
            <p>Results achieved by the expert system in the area of expressive language and articulation.</p>
          </caption>
          <graphic xlink:href="medinform_v4i3e23_fig13.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure14" position="float">
          <label>Figure 14</label>
          <caption>
            <p>Results achieved by the expert system in the area of receptive language.</p>
          </caption>
          <graphic xlink:href="medinform_v4i3e23_fig14.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <sec>
          <title>Hearing</title>
          <p>The 13 subplans generated for this area were considered usable by the SLPs according to the Likert scale. Indeed, only the subplans assembled for new cases 1 and 4 obtained scores of 4 in some of the criteria; all other ratings were 5. For case 1 (a patient with Down syndrome), the SLPs found that it was possible to make some small improvements in the consistency and completeness of the subplan, for which they added one activity to reinforce auditory memory through exercises related to the execution/understanding of simple orders. For case 4 (a patient with mild intellectual disability), in turn, the SLPs determined that the subplan provided by SPELTA was complete for the selected activities, but these did not fully address all the necessary skills in a fully consistent manner for the patient. They changed two activities for less complex ones and added one activity to stimulate the localization of sound sources.</p>
        </sec>
        <sec>
          <title>Oral Structure and Function</title>
          <p>Again, the 13 subplans generated for this area were considered usable, and only the ones generated for new cases 1 and 3 obtained lower than perfect ratings. Regarding case 1, the SLPs considered the subplan largely usable and were looking at fine-grained details due to their abundant experience in treating Down syndrome. For case 3 (a patient with spastic cerebral palsy and dysphasia), the subplan was found to be fully consistent with the patient’s needs, but some of the selected activities were not the best for the case, and the routines missed some exercises the SLPs deemed important. Driven by the most similar cases available in the training corpus, the SPELTA system selected a few exercises that were more suitable for someone with a slightly greater developmental age (around 4 years).</p>
        </sec>
        <sec>
          <title>Linguistic Formulation</title>
          <p>In this area, all subplans provided by SPELTA were considered usable, and only the one designed for new case 2 (a patient with spastic hemiparesis and dysphasia) got scores of 4 for accuracy and completeness. The SLPs found it necessary to include exercises to complement oral motor rehabilitation and to develop some mainstays (eg, lips control, tongue control) that would provide support in more complex process (eg, getting correct positioning of the phono-articulatory organs for speech production).</p>
        </sec>
        <sec>
          <title>Expressive Language and Articulation</title>
          <p>This is the area where the expert system showed poorest performance, since it failed to generate usable subplans for new cases 8, 10, 11, and 13. The SLPs found that some of the selected activities would not serve to train the affected skills (inaccuracy), whereas some of the exercises were too complex for the ages and developmental gaps of those patients (inconsistency), and the overall planning of the therapy sessions was not balanced, lacking attention to important traits (incompleteness). The analysis of the cases revealed that the training corpus was too sparse to address their specifics according to the outcomes of the evaluation of the 102 SL parameters. In the absence of very specific training, for example, SPELTA produced largely similar subplans for the new cases 8 and 10, reusing activities and exercises from previous cases that were found to be similar. However, even though both subjects were affected by athetoid cerebral palsy, they differed in that subject 8 would not understand some orders and exercises, whereas subject 10 would not be able to perform some of the selected exercises due to uncontrolled movements of limbs and trunk.</p>
        </sec>
        <sec>
          <title>Receptive Language</title>
          <p>In this area, the system could not generate a correct subplan only for the cases 3 and 7. The subplan generated for case 7 (a patient affected by cerebral palsy) would have been better suited to someone with greater developmental age, whereas the one generated for case 3 (a patient with spastic cerebral palsy and dysphasia) failed to pay proper attention to the large developmental gap.</p>
          <p>The average values of accuracy, consistency, and completeness attained in the five SL areas and globally are shown in <xref ref-type="table" rid="table2">Table 2</xref>. The validity of the subplans generated automatically and of the therapy plans as a whole (discarding any plan that contained an invalid subplan) are given in <xref ref-type="table" rid="table3">Table 3</xref>.</p>
          <p>Finally, <xref ref-type="table" rid="table4">Table 4</xref> summarizes the replies to the question of whether the subplans provided by SPELTA were “better than” or “as good as” the plans that the SLPs would have created manually.</p>
          <table-wrap position="float" id="table2">
            <label>Table 2</label>
            <caption>
              <p>Average values of accuracy, consistency, and completeness.</p>
            </caption>
            <table width="665" cellpadding="7" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="84"/>
              <col width="46"/>
              <col width="92"/>
              <col width="91"/>
              <col width="126"/>
              <col width="84"/>
              <col width="42"/>
              <thead>
                <tr valign="top">
                  <td><break/></td>
                  <td>Hearing</td>
                  <td>Oral structure &#38; function</td>
                  <td>Linguistic formulation</td>
                  <td>Expressive language &#38; articulation</td>
                  <td>Receptive language</td>
                  <td>Overall</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Accuracy</td>
                  <td>4.92</td>
                  <td>4.92</td>
                  <td>4.92</td>
                  <td>3.77</td>
                  <td>4.69</td>
                  <td>4.65</td>
                </tr>
                <tr valign="top">
                  <td>Consistency</td>
                  <td>4.85</td>
                  <td>4.92</td>
                  <td>5</td>
                  <td>3.77</td>
                  <td>4.46</td>
                  <td>4.60</td>
                </tr>
                <tr valign="top">
                  <td>Completeness</td>
                  <td>4.92</td>
                  <td>4.85</td>
                  <td>4.92</td>
                  <td>3.77</td>
                  <td>4.54</td>
                  <td>4.60</td>
                </tr>
              </tbody>
            </table>
          </table-wrap>
          <table-wrap position="float" id="table3">
            <label>Table 3</label>
            <caption>
              <p>Validity of the subplans generated for each area, and the plans as a whole.</p>
            </caption>
            <table width="665" cellpadding="7" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="443"/>
              <col width="192"/>
              <thead>
                <tr valign="top">
                  <td>Subplans</td>
                  <td>%</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Hearing</td>
                  <td>100</td>
                </tr>
                <tr valign="top">
                  <td>Oral structure &#38; function</td>
                  <td>100</td>
                </tr>
                <tr valign="top">
                  <td>Linguistic formulation</td>
                  <td>100</td>
                </tr>
                <tr valign="top">
                  <td>Expressive language &#38; articulation</td>
                  <td>69</td>
                </tr>
                <tr valign="top">
                  <td>Receptive language</td>
                  <td>85</td>
                </tr>
                <tr valign="top">
                  <td>Overall plans for the five areas</td>
                  <td>54</td>
                </tr>
              </tbody>
            </table>
          </table-wrap>
          <table-wrap position="float" id="table4">
            <label>Table 4</label>
            <caption>
              <p>Percentage of positive replies to whether the expert system provided an output comparable to that of a human SLP.</p>
            </caption>
            <table width="665" cellpadding="7" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="443"/>
              <col width="192"/>
              <thead>
                <tr valign="top">
                  <td>Subplans</td>
                  <td>%</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Hearing</td>
                  <td>100</td>
                </tr>
                <tr valign="top">
                  <td>Oral structure &#38; function</td>
                  <td>85</td>
                </tr>
                <tr valign="top">
                  <td>Linguistic formulation</td>
                  <td>92</td>
                </tr>
                <tr valign="top">
                  <td>Expressive language &#38; articulation</td>
                  <td>62</td>
                </tr>
                <tr valign="top">
                  <td>Receptive language</td>
                  <td>85</td>
                </tr>
                <tr valign="top">
                  <td>Overall plans for the five areas</td>
                  <td>92</td>
                </tr>
              </tbody>
            </table>
          </table-wrap>
        </sec>
      </sec>
      <sec>
        <title>Cross-Validation on a Partition of the Corpus</title>
        <p><xref ref-type="table" rid="table5">Tables 5</xref>,<xref ref-type="table" rid="table6">6</xref>, and 7 show the average values obtained on the Likert scale in the four rounds of cross-validation with a partition of the original corpus of 117 cases. In turn, <xref ref-type="table" rid="table8">Tables 8</xref> and <xref ref-type="table" rid="table9">9</xref> contain data about the validity of the therapy plans and subplans provided by the system, and the replies to the question of whether the subplans were “better than” or “as good as” the plans that the SLPs would have created manually.</p>
        <table-wrap position="float" id="table5">
          <label>Table 5</label>
          <caption>
            <p>Average values of accuracy, consistency, and completeness for the areas of hearing and of oral structure and function in the rounds of cross-validation.</p>
          </caption>
          <table width="660" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="59"/>
            <col width="68"/>
            <col width="90"/>
            <col width="106"/>
            <col width="68"/>
            <col width="90"/>
            <col width="106"/>
            <thead>
              <tr valign="top">
                <td rowspan="2">K</td>
                <td colspan="3">Hearing</td>
                <td colspan="3">Oral structure &#38; function</td>
              </tr>
              <tr valign="top">
                <td>Accuracy</td>
                <td>Consistency</td>
                <td>Completeness</td>
                <td>Accuracy</td>
                <td>Consistency</td>
                <td>Completeness</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>1</td>
                <td>4.8</td>
                <td>4.74</td>
                <td>4.91</td>
                <td>4.94</td>
                <td>4.97</td>
                <td>4.75</td>
              </tr>
              <tr valign="top">
                <td>2</td>
                <td>4.93</td>
                <td>4.87</td>
                <td>4.9</td>
                <td>4.95</td>
                <td>4.85</td>
                <td>4.87</td>
              </tr>
              <tr valign="top">
                <td>3</td>
                <td>4.84</td>
                <td>4.8</td>
                <td>4.83</td>
                <td>4.82</td>
                <td>4.91</td>
                <td>4.83</td>
              </tr>
              <tr valign="top">
                <td>4</td>
                <td>4.9</td>
                <td>4.72</td>
                <td>4.85</td>
                <td>4.92</td>
                <td>4.83</td>
                <td>4.77</td>
              </tr>
              <tr valign="top">
                <td>Average</td>
                <td>4.87</td>
                <td>4.78</td>
                <td>4.87</td>
                <td>4.91</td>
                <td>4.89</td>
                <td>4.81</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <table-wrap position="float" id="table6">
          <label>Table 6</label>
          <caption>
            <p>Average values of accuracy, consistency, and completeness for the areas of linguistic formulation, and of expressive language and articulation in the rounds of cross-validation.</p>
          </caption>
          <table width="660" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="59"/>
            <col width="68"/>
            <col width="90"/>
            <col width="106"/>
            <col width="68"/>
            <col width="90"/>
            <col width="106"/>
            <thead>
              <tr valign="top">
                <td rowspan="2">K</td>
                <td colspan="3">Linguistic formulation</td>
                <td colspan="3">Expressive language &#38; articulation</td>
              </tr>
              <tr valign="top">
                <td>Accuracy</td>
                <td>Consistency</td>
                <td>Completeness</td>
                <td>Accuracy</td>
                <td>Consistency</td>
                <td>Completeness</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>1</td>
                <td>4.84</td>
                <td>4.94</td>
                <td>4.72</td>
                <td>2.93</td>
                <td>2.93</td>
                <td>2.68</td>
              </tr>
              <tr valign="top">
                <td>2</td>
                <td>4.89</td>
                <td>4.98</td>
                <td>4.85</td>
                <td>2.78</td>
                <td>2.91</td>
                <td>2.97</td>
              </tr>
              <tr valign="top">
                <td>3</td>
                <td>4.8</td>
                <td>4.89</td>
                <td>4.91</td>
                <td>3.57</td>
                <td>3.02</td>
                <td>3.31</td>
              </tr>
              <tr valign="top">
                <td>4</td>
                <td>4.9</td>
                <td>4.82</td>
                <td>4.9</td>
                <td>3.14</td>
                <td>2.98</td>
                <td>3.16</td>
              </tr>
              <tr valign="top">
                <td>Average</td>
                <td>4.86</td>
                <td>4.91</td>
                <td>4.85</td>
                <td>3.11</td>
                <td>2.96</td>
                <td>3.03</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <table-wrap position="float" id="table7">
          <label>Table 7</label>
          <caption>
            <p>Average values of accuracy, consistency, and completeness for receptive language and overall scores in the rounds of cross-validation.</p>
          </caption>
          <table width="660" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="59"/>
            <col width="68"/>
            <col width="90"/>
            <col width="106"/>
            <col width="68"/>
            <col width="90"/>
            <col width="106"/>
            <thead>
              <tr valign="top">
                <td rowspan="2">K</td>
                <td colspan="3">Receptive language</td>
                <td colspan="3">Overall</td>
              </tr>
              <tr valign="top">
                <td>Accuracy</td>
                <td>Consistency</td>
                <td>Completeness</td>
                <td>Accuracy</td>
                <td>Consistency</td>
                <td>Completeness</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>1</td>
                <td>4.57</td>
                <td>4.01</td>
                <td>4.28</td>
                <td>4.42</td>
                <td>4.32</td>
                <td>4.27</td>
              </tr>
              <tr valign="top">
                <td>2</td>
                <td>4.67</td>
                <td>4.41</td>
                <td>4.51</td>
                <td>4.44</td>
                <td>4.40</td>
                <td>4.42</td>
              </tr>
              <tr valign="top">
                <td>3</td>
                <td>4.66</td>
                <td>4.41</td>
                <td>4.55</td>
                <td>4.54</td>
                <td>4.41</td>
                <td>4.49</td>
              </tr>
              <tr valign="top">
                <td>4</td>
                <td>4.34</td>
                <td>4.65</td>
                <td>4.28</td>
                <td>4.44</td>
                <td>4.40</td>
                <td>4.39</td>
              </tr>
              <tr valign="top">
                <td>Average</td>
                <td>4.56</td>
                <td>4.37</td>
                <td>4.41</td>
                <td>4.46</td>
                <td>4.38</td>
                <td>4.39</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <table-wrap position="float" id="table8">
          <label>Table 8</label>
          <caption>
            <p>Validity of the subplans generated for each area, and the plans as a whole in the rounds of cross-validation.</p>
          </caption>
          <table width="658" cellpadding="7" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="327"/>
            <col width="49"/>
            <col width="49"/>
            <col width="49"/>
            <col width="49"/>
            <col width="48"/>
            <thead>
              <tr valign="top">
                <td>Subplans</td>
                <td>1</td>
                <td>2</td>
                <td>3</td>
                <td>4</td>
                <td>Average</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Hearing</td>
                <td>97%</td>
                <td>93%</td>
                <td>97%</td>
                <td>100%</td>
                <td>97%</td>
              </tr>
              <tr valign="top">
                <td>Oral structure &#38; function</td>
                <td>93%</td>
                <td>93%</td>
                <td>100%</td>
                <td>100%</td>
                <td>97%</td>
              </tr>
              <tr valign="top">
                <td>Linguistic formulation</td>
                <td>97%</td>
                <td>100%</td>
                <td>93%</td>
                <td>93%</td>
                <td>96%</td>
              </tr>
              <tr valign="top">
                <td>Expressive language &#38; articulation</td>
                <td>79%</td>
                <td>72%</td>
                <td>72%</td>
                <td>77%</td>
                <td>75%</td>
              </tr>
              <tr valign="top">
                <td>Receptive language</td>
                <td>76%</td>
                <td>76%</td>
                <td>79%</td>
                <td>80%</td>
                <td>78%</td>
              </tr>
              <tr valign="top">
                <td>Overall plans for the five areas</td>
                <td>48%</td>
                <td>45%</td>
                <td>52%</td>
                <td>50%</td>
                <td>49%</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <table-wrap position="float" id="table9">
          <label>Table 9</label>
          <caption>
            <p>Percentage of positive replies to whether the expert system provided an output comparable to that of a human SLP in the rounds of cross-validation.</p>
          </caption>
          <table width="658" cellpadding="7" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="327"/>
            <col width="49"/>
            <col width="49"/>
            <col width="49"/>
            <col width="49"/>
            <col width="48"/>
            <thead>
              <tr valign="top">
                <td>Subplans</td>
                <td>1</td>
                <td>2</td>
                <td>3</td>
                <td>4</td>
                <td>Average</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Hearing</td>
                <td>97%</td>
                <td>90%</td>
                <td>93%</td>
                <td>97%</td>
                <td>94%</td>
              </tr>
              <tr valign="top">
                <td>Oral structure &#38; function</td>
                <td>83%</td>
                <td>79%</td>
                <td>90%</td>
                <td>83%</td>
                <td>84%</td>
              </tr>
              <tr valign="top">
                <td>Linguistic formulation</td>
                <td>93%</td>
                <td>93%</td>
                <td>90%</td>
                <td>93%</td>
                <td>92%</td>
              </tr>
              <tr valign="top">
                <td>Expressive language &#38; articulation</td>
                <td>55%</td>
                <td>55%</td>
                <td>59%</td>
                <td>57%</td>
                <td>57%</td>
              </tr>
              <tr valign="top">
                <td>Receptive language</td>
                <td>83%</td>
                <td>79%</td>
                <td>79%</td>
                <td>87%</td>
                <td>82%</td>
              </tr>
              <tr valign="top">
                <td>Overall plans for the five areas</td>
                <td>90%</td>
                <td>93%</td>
                <td>90%</td>
                <td>90%</td>
                <td>91%</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>The results obtained in this experiment of generating therapy plans for new subject cases are encouraging about the potential use of the SPELTA expert system in SLT practice. The ratings achieved in terms of accuracy, consistency, and completeness show that the system succeeds in the task of automatically creating new therapy plans out of the knowledge contained in its corpus and in the catalogues of activities and exercises. The subplans generated for the different SL areas were most often considered valid and directly usable, whereas the evaluation of the overall plans was hindered only by the relatively poor performance in the area of expressive language and articulation. Careful analysis of the results in that area suggests that it is necessary to refine some aspects of the reasoning mechanisms of the expert system, even though a more extensive corpus of cases would have also helped to achieve better ratings.</p>
        <p>Overall, the SLPs found that the plans provided by SPELTA are, most often, as good as the ones they would have created themselves in their normal work routines (not given sufficient time to work optimally). Thus, the system is a useful tool that can achieve significant savings of valuable and scarce human resources. In order to substantiate the time savings, the SLPs informally measured that the identification and supervision of semi-annual activities to put in a new therapy plan went from an average of 30 minutes down to 5 minutes; the selection of multimedia resources for specific exercises and sessions went from 40 to 6 minutes; and the generation of reports was automated to the point of reducing 24 minutes to 3.</p>
        <p>The percentage of positive judgments (92%; <xref ref-type="table" rid="table4">Table 4</xref>) is much higher than the percentage of plans that contained valid subplans for all five SL areas (54%; <xref ref-type="table" rid="table3">Table 3</xref>), showing that the SLPs still considered most of the subplans useful and valuable. Accordingly, the SLPs always took the output of SPELTA as a starting point to produce the final therapy plans to use with new patients. Furthermore, they praised the fact that the expert system helped them consider a larger set of activities and exercises than if they had proceeded manually.</p>
        <p>The four rounds of the cross-validation experiment yielded similar results, but the fact that the training sets were smaller (87, 88, 88, and 88 cases against 117) had an impact on the quality of the therapy plans, going down from 4.65 accuracy to 4.46, from 4.60 consistency to 4.38, and from 4.60 completeness to 4.39. Still, 49% of the plans were valid straightaway, and 91% were received positively by the SLPs. The greatest impact of working with a more reduced knowledge base was seen in the area of expressive language and articulation, which is in line with the previous observation that a larger corpus will be beneficial.</p>
      </sec>
      <sec>
        <title>Comparison With Prior Work</title>
        <p>Decision Support Systems (DSS) are becoming increasingly used in the realm of speech and language therapy, with plenty of technical solutions in place to address the specific challenges of the many different disorders. Some DSS depend entirely on input provided by humans, while others rely on signal processing techniques to achieve a level of automation. Thus, on the one hand, Martín Ruiz et al [<xref ref-type="bibr" rid="ref22">22</xref>] evaluated a Web-based DSS to monitor children’s neurodevelopment via the early detection of language delays at a nursery school, relying on input provided by the educators and on a set of over 100 rules to generate alerts in case deviations from the expected developmental milestones. On the other hand, Schipor et al [<xref ref-type="bibr" rid="ref12">12</xref>] presented a model for automatic assessment of pronunciation quality for children, using Hidden Markov Models (HMM) and implementing a correlation measure to measure the level of intelligibility of utterances. Similarly, Saz et al [<xref ref-type="bibr" rid="ref13">13</xref>] had used HMM in combination with a subword-based pronunciation verification method. Utianski et al [<xref ref-type="bibr" rid="ref14">14</xref>] developed an application able to record speech samples and make calculations to assess the integrity of speech production (vowel space area, assessment of an individual’s pathology fingerprint, and identification of parameters of the intelligibility disorder). For a final sample, Caballero-Morales and Trujillo-Romero [<xref ref-type="bibr" rid="ref15">15</xref>] improved the recognition rates for dysarthric patients by integrating multiple pronunciation patterns using genetic algorithms.</p>
        <p>All of the aforementioned works focused on providing aids for SLT diagnosis tasks. The idea of aiding in the design of speech and language therapy plans—as we aim to do with the SPELTA system—has fewer precedents in the literature. The closest reference can be found in the work of Schipor et al [<xref ref-type="bibr" rid="ref16">16</xref>], who developed a system based on fuzzy logic to plan sessions for the treatment of dyslalia, taking input from social, cognitive, and affective parameters, and providing output about types of exercises, frequency, and duration. Later, Yeh et al [<xref ref-type="bibr" rid="ref17">17</xref>] presented an approach based on neural networks to classify a wide range of SLT problems in order to help design occupational therapy plans, which may include some help to improve communication skills.</p>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>We believe our study has two main limitations. First, while the results do not show much variability (ratings of 5 were most numerous by far), the SPELTA system needs to be evaluated on a larger set of subject cases. Presumably, the system algorithms will behave more reliably in the presence of a larger corpus, since the sparsity of the corpus we used in our study was one of the reasons for the poor performance in the area of expressive language and articulation.</p>
        <p>Second, and probably more important, it would be interesting to experiment with more SLPs from more institutions and other situations than in Ecuador. The 4 SLPs participating in our study had been trained by the same books in the same school, which raises the possibility that there might be some bias in the judgment of the therapy plans presented to them. In the quest for greater evidence, we are actively seeking agreements to test our tools with universities, foundations, and professional associations from other Spanish-speaking countries.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>Our study shows that the SPELTA expert system provides valuable input for SLPs to design proper therapy plans for their patients, in a shorter time and considering a larger set of activities than proceeding manually. The system achieves nearly perfect performance in the areas of hearing, oral structure and function, and linguistic formulation, and also decent performance in receptive language. The poorer results in the area of expressive language and articulation have served to identify opportunities for technical improvements, in order to deal properly with new combinations of medical conditions and SL disorders, not properly captured in the corpus. Having a more extensive corpus would obviously help, but in the meantime before a database with thousands of cases becomes available, we are doing research on whether it would be good to adjust internal parameters of the current reasoning system of SPELTA, to define new metrics to compare cases and profiles, and to supplement the internal logic with radically different machine learning artifacts such as the cortical learning algorithm [<xref ref-type="bibr" rid="ref23">23</xref>].</p>
        <p>For future work, we propose a study of two new artificial intelligence techniques supporting the generation of therapy plans. First, we want to use template-based generation methods with weak supervisions [<xref ref-type="bibr" rid="ref24">24</xref>], defining a structure based on different levels of granularity in which it will be possible to incorporate common strategies, activities, and resources according to some specific traits and needs derived from the patient’s profile. Second, we are interested in deep belief networks and recurrent neural networks [<xref ref-type="bibr" rid="ref25">25</xref>], which may be able to extract the subtlest patterns from the complex data and interrelations of the SLT area.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <app id="app1">
        <title>Multimedia Appendix 1</title>
        <p>Summary of patient profiles.</p>
        <media xlink:href="medinform_v4i3e23_app1.pdf" xlink:title="PDF File (Adobe PDF File), 67KB"/>
      </app>
      <app id="app2">
        <title>Multimedia Appendix 2</title>
        <p>Summary of profiles for patients randomly selected to the expert system.</p>
        <media xlink:href="medinform_v4i3e23_app2.pdf" xlink:title="PDF File (Adobe PDF File), 27KB"/>
      </app>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">DSS</term>
          <def>
            <p>Decision Support Systems</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">HMM</term>
          <def>
            <p>Hidden Markov Models</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">ICT</term>
          <def>
            <p>information and communication technology</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">SL</term>
          <def>
            <p>Speech and Language</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">SLP</term>
          <def>
            <p>Speech and Language Pathologist</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">SLT</term>
          <def>
            <p>Speech and Language Therapy</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">SPELTA</term>
          <def>
            <p>SPEech and Language Therapy Assistant</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>The authors from Universidad Politécnica Salesiana have been supported by the “Sistemas Inteligentes de Soporte a la Educación” research project (CIDII-010213). We would like to thank Zaituna Bykbaeva, Gladys Ochoa, and all the collaborating people from Instituto de Parálisis Cerebral del Azuay, Fundación “General Dávalos,” and CEDEI School.</p>
      <p>The authors from the University of Vigo were supported by the European Regional Development Fund (ERDF) and the Galician Regional Government under agreement for funding the Atlantic Research Center for Information and Communication Technologies (AtlantTIC), as well as by the Ministerio de Educación y Ciencia (Gobierno de España) research project TIN2013-42774-R (partly financed with FEDER [Spanish Federation of RD] funds).</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="book">
        <person-group person-group-type="author">
          <collab>World Health Organization</collab>
          <collab>The World Bank</collab>
        </person-group>
        <article-title>World report on disability</article-title>
        <source>WHO Library Cataloguing-in-Publication Data</source>  
        <year>2011</year>  
        <publisher-loc>Malta</publisher-loc>
        <publisher-name>WHO Press</publisher-name></nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Ruben</surname>
            <given-names>RJ</given-names>
          </name>
        </person-group>
        <article-title>Redefining the survival of the fittest: communication disorders in the 21st century</article-title>
        <source>Laryngoscope</source>  
        <year>2000</year>  
        <month>02</month>  
        <volume>110</volume>  
        <issue>2 Pt 1</issue>  
        <fpage>241</fpage>  
        <lpage>245</lpage>  
        <pub-id pub-id-type="doi">10.1097/00005537-200002010-00010</pub-id>
        <pub-id pub-id-type="medline">10680923</pub-id></nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Lauritsen</surname>
            <given-names>MB</given-names>
          </name>
          <name name-style="western">
            <surname>Pedersen</surname>
            <given-names>CB</given-names>
          </name>
          <name name-style="western">
            <surname>Mortensen</surname>
            <given-names>PB</given-names>
          </name>
        </person-group>
        <article-title>The incidence and prevalence of pervasive developmental disorders: a Danish population-based study</article-title>
        <source>Psychol Med</source>  
        <year>2004</year>  
        <month>10</month>  
        <volume>34</volume>  
        <issue>7</issue>  
        <fpage>1339</fpage>  
        <lpage>1346</lpage>  
        <pub-id pub-id-type="medline">15697060</pub-id></nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Roy</surname>
            <given-names>N</given-names>
          </name>
          <name name-style="western">
            <surname>Merrill</surname>
            <given-names>RM</given-names>
          </name>
          <name name-style="western">
            <surname>Gray</surname>
            <given-names>SD</given-names>
          </name>
          <name name-style="western">
            <surname>Smith</surname>
            <given-names>EM</given-names>
          </name>
        </person-group>
        <article-title>Voice disorders in the general population: prevalence, risk factors, and occupational impact</article-title>
        <source>Laryngoscope</source>  
        <year>2005</year>  
        <month>11</month>  
        <volume>115</volume>  
        <issue>11</issue>  
        <fpage>1988</fpage>  
        <lpage>1995</lpage>  
        <pub-id pub-id-type="doi">10.1097/01.mlg.0000179174.32345.41</pub-id>
        <pub-id pub-id-type="medline">16319611</pub-id>
        <pub-id pub-id-type="pii">00005537-200511000-00016</pub-id></nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Roy</surname>
            <given-names>N</given-names>
          </name>
          <name name-style="western">
            <surname>Stemple</surname>
            <given-names>J</given-names>
          </name>
          <name name-style="western">
            <surname>Merrill</surname>
            <given-names>RM</given-names>
          </name>
          <name name-style="western">
            <surname>Thomas</surname>
            <given-names>L</given-names>
          </name>
        </person-group>
        <article-title>Dysphagia in the elderly: preliminary evidence of prevalence, risk factors, and socioemotional effects</article-title>
        <source>Ann Otol Rhinol Laryngol</source>  
        <year>2007</year>  
        <month>11</month>  
        <volume>116</volume>  
        <issue>11</issue>  
        <fpage>858</fpage>  
        <lpage>865</lpage>  
        <pub-id pub-id-type="medline">18074673</pub-id></nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="web">
        <person-group person-group-type="author">
          <collab>American Speech-Language-Hearing Association (ASHA)</collab>
        </person-group>
        <source>Speech &#38; Language Disorders</source>  
        <access-date>2016-02-09</access-date>
        <comment>Quick facts 
        <ext-link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:type="simple" xlink:href="http://www.asha.org/About/news/Quick-Facts/">http://www.asha.org/About/news/Quick-Facts/</ext-link>
        <ext-link ext-link-type="webcite" xlink:href="6fAxGGqyt"/></comment> </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Code</surname>
            <given-names>C</given-names>
          </name>
          <name name-style="western">
            <surname>Petheram</surname>
            <given-names>B</given-names>
          </name>
        </person-group>
        <article-title>Delivering for aphasia</article-title>
        <source>Int J Speech Lang Pathol</source>  
        <year>2011</year>  
        <month>02</month>  
        <volume>13</volume>  
        <issue>1</issue>  
        <fpage>3</fpage>  
        <lpage>10</lpage>  
        <pub-id pub-id-type="doi">10.3109/17549507.2010.520090</pub-id>
        <pub-id pub-id-type="medline">21329405</pub-id></nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>McAllister</surname>
            <given-names>L</given-names>
          </name>
          <name name-style="western">
            <surname>Wylie</surname>
            <given-names>K</given-names>
          </name>
          <name name-style="western">
            <surname>Davidson</surname>
            <given-names>B</given-names>
          </name>
          <name name-style="western">
            <surname>Marshall</surname>
            <given-names>J</given-names>
          </name>
        </person-group>
        <article-title>The World Report on Disability: an impetus to reconceptualize services for people with communication disability</article-title>
        <source>Int J Speech Lang Pathol</source>  
        <year>2013</year>  
        <month>02</month>  
        <volume>15</volume>  
        <issue>1</issue>  
        <fpage>118</fpage>  
        <lpage>126</lpage>  
        <pub-id pub-id-type="doi">10.3109/17549507.2012.757804</pub-id>
        <pub-id pub-id-type="medline">23323824</pub-id></nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="book">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Turnbull</surname>
            <given-names>R</given-names>
          </name>
          <name name-style="western">
            <surname>Wehmeyer</surname>
            <given-names>M</given-names>
          </name>
          <name name-style="western">
            <surname>Shogren</surname>
            <given-names>K</given-names>
          </name>
        </person-group>
        <source>Exceptional Lives: Special Education in Today's Schools (7th Edition)</source>  
        <year>2013</year>  
        <publisher-loc>Kansas</publisher-loc>
        <publisher-name>Prentice Hall</publisher-name></nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Mackenzie</surname>
            <given-names>C</given-names>
          </name>
          <name name-style="western">
            <surname>Le</surname>
            <given-names>MM</given-names>
          </name>
          <name name-style="western">
            <surname>Lendrem</surname>
            <given-names>W</given-names>
          </name>
          <name name-style="western">
            <surname>McGuirk</surname>
            <given-names>E</given-names>
          </name>
          <name name-style="western">
            <surname>Marshall</surname>
            <given-names>J</given-names>
          </name>
          <name name-style="western">
            <surname>Rossiter</surname>
            <given-names>D</given-names>
          </name>
        </person-group>
        <article-title>A survey of aphasia services in the United Kingdom</article-title>
        <source>Eur J Disord Commun</source>  
        <year>1993</year>  
        <volume>28</volume>  
        <issue>1</issue>  
        <fpage>43</fpage>  
        <lpage>61</lpage>  
        <pub-id pub-id-type="medline">8400482</pub-id></nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Code</surname>
            <given-names>C</given-names>
          </name>
          <name name-style="western">
            <surname>Heron</surname>
            <given-names>C</given-names>
          </name>
        </person-group>
        <article-title>Services for aphasia, other acquired adult neurogenic communication and swallowing disorders in the United Kingdom, 2000</article-title>
        <source>Disabil Rehabil</source>  
        <year>2003</year>  
        <month>11</month>  
        <day>4</day>  
        <volume>25</volume>  
        <issue>21</issue>  
        <fpage>1231</fpage>  
        <lpage>1237</lpage>  
        <pub-id pub-id-type="doi">10.1080/09638280310001599961</pub-id>
        <pub-id pub-id-type="medline">14578063</pub-id>
        <pub-id pub-id-type="pii">B3AL8684R98LCRQH</pub-id></nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Schipor</surname>
            <given-names>OA</given-names>
          </name>
          <name name-style="western">
            <surname>Pentiuc</surname>
            <given-names>SG</given-names>
          </name>
          <name name-style="western">
            <surname>Schipor</surname>
            <given-names>MD</given-names>
          </name>
        </person-group>
        <article-title>Automatic Assessment of Pronunciation Quality of Children within Assisted Speech Therapy</article-title>
        <source>Electronics &#38; Electrical Engineering</source>  
        <year>2012</year>  
        <month>06</month>  
        <day>11</day>  
        <volume>122</volume>  
        <issue>6</issue>  
        <fpage>15</fpage>  
        <lpage>18</lpage>  
        <pub-id pub-id-type="doi">10.5755/j01.eee.122.6.1813</pub-id></nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Saz</surname>
            <given-names>O</given-names>
          </name>
          <name name-style="western">
            <surname>Yin</surname>
            <given-names>S</given-names>
          </name>
          <name name-style="western">
            <surname>Lleida</surname>
            <given-names>E</given-names>
          </name>
          <name name-style="western">
            <surname>Rose</surname>
            <given-names>R</given-names>
          </name>
          <name name-style="western">
            <surname>Vaquero</surname>
            <given-names>C</given-names>
          </name>
          <name name-style="western">
            <surname>Rodríguez</surname>
            <given-names>WR</given-names>
          </name>
        </person-group>
        <article-title>Tools and Technologies for Computer-Aided Speech and Language Therapy</article-title>
        <source>Speech Communication</source>  
        <year>2009</year>  
        <month>10</month>  
        <volume>51</volume>  
        <issue>10</issue>  
        <fpage>948</fpage>  
        <lpage>967</lpage>  
        <pub-id pub-id-type="doi">10.1016/j.specom.2009.04.006</pub-id></nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Utianski</surname>
            <given-names>R</given-names>
          </name>
          <name name-style="western">
            <surname>Sandoval</surname>
            <given-names>S</given-names>
          </name>
          <name name-style="western">
            <surname>Lehrer</surname>
            <given-names>N</given-names>
          </name>
          <name name-style="western">
            <surname>Berisha</surname>
            <given-names>V</given-names>
          </name>
          <name name-style="western">
            <surname>Liss</surname>
            <given-names>J</given-names>
          </name>
        </person-group>
        <article-title>Speech assist: An augmentative tool for practice in speech-language pathology</article-title>
        <source>Journal of the Acoustical Society of America</source>  
        <year>2013</year>  
        <volume>134</volume>  
        <issue>5</issue>  
        <pub-id pub-id-type="doi">10.1121/1.4831186</pub-id></nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Caballero-Morales</surname>
            <given-names>S</given-names>
          </name>
          <name name-style="western">
            <surname>Trujillo-Romero</surname>
            <given-names>F</given-names>
          </name>
        </person-group>
        <article-title>Evolutionary approach for integration of multiple pronunciation patterns for enhancement of dysarthric speech recognition</article-title>
        <source>Expert Systems with Applications</source>  
        <year>2014</year>  
        <month>2</month>  
        <volume>41</volume>  
        <issue>3</issue>  
        <fpage>841</fpage>  
        <lpage>852</lpage>  
        <pub-id pub-id-type="doi">10.1016/j.eswa.2013.08.014</pub-id></nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Schipor</surname>
            <given-names>O</given-names>
          </name>
          <name name-style="western">
            <surname>Pentiuc</surname>
            <given-names>S</given-names>
          </name>
          <name name-style="western">
            <surname>Schipor</surname>
            <given-names>M</given-names>
          </name>
        </person-group>
        <article-title>Improving computer-based speech therapy using a fuzzy expert system</article-title>
        <source>Computing &#38; Informatics</source>  
        <year>2010</year>  
        <volume>29</volume>  
        <issue>2</issue>  
        <fpage>303</fpage>  
        <lpage>318</lpage> </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Yeh</surname>
            <given-names>Y</given-names>
          </name>
          <name name-style="western">
            <surname>Hou</surname>
            <given-names>T</given-names>
          </name>
          <name name-style="western">
            <surname>Chang</surname>
            <given-names>W</given-names>
          </name>
        </person-group>
        <article-title>An intelligent model for the classification of children’s occupational therapy problems</article-title>
        <source>Expert Systems with Applications</source>  
        <year>2012</year>  
        <month>4</month>  
        <volume>39</volume>  
        <issue>5</issue>  
        <fpage>5233</fpage>  
        <lpage>5242</lpage>  
        <pub-id pub-id-type="doi">10.1016/j.eswa.2011.11.016</pub-id></nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Robles-Bykbaev</surname>
            <given-names>VE</given-names>
          </name>
          <name name-style="western">
            <surname>López-Nores</surname>
            <given-names>M</given-names>
          </name>
          <name name-style="western">
            <surname>Pazos-Arias</surname>
            <given-names>JJ</given-names>
          </name>
          <name name-style="western">
            <surname>Arévalo-Lucero</surname>
            <given-names>D</given-names>
          </name>
        </person-group>
        <article-title>SPELTA: An expert system to generate therapy plans for speech and language disorders</article-title>
        <source>Expert Systems with Applications</source>  
        <year>2015</year>  
        <month>11</month>  
        <volume>42</volume>  
        <issue>21</issue>  
        <fpage>7641</fpage>  
        <lpage>7651</lpage>  
        <pub-id pub-id-type="doi">10.1016/j.eswa.2015.06.011</pub-id></nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Robles-Bykbaev</surname>
            <given-names>V</given-names>
          </name>
          <name name-style="western">
            <surname>López-Nores</surname>
            <given-names>M</given-names>
          </name>
          <name name-style="western">
            <surname>Pazos-Arias</surname>
            <given-names>J</given-names>
          </name>
          <name name-style="western">
            <surname>Quisi-Peralta</surname>
            <given-names>D</given-names>
          </name>
          <name name-style="western">
            <surname>García-Duque</surname>
            <given-names>J</given-names>
          </name>
        </person-group>
        <article-title>An Ecosystem of Intelligent ICT Tools for Speech-Language Therapy Based on a Formal Knowledge Model</article-title>
        <source>Stud Health Technol Inform</source>  
        <year>2015</year>  
        <volume>216</volume>  
        <fpage>50</fpage>  
        <lpage>54</lpage>  
        <pub-id pub-id-type="medline">26262008</pub-id></nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Geetha</surname>
            <given-names>T</given-names>
          </name>
          <name name-style="western">
            <surname>Arock</surname>
            <given-names>M</given-names>
          </name>
        </person-group>
        <article-title>Data clustering using modified k-medoids algorithm</article-title>
        <source>IJMEI</source>  
        <year>2012</year>  
        <volume>4</volume>  
        <issue>2</issue>  
        <fpage>109</fpage>  
        <lpage>124</lpage>  
        <pub-id pub-id-type="doi">10.1504/IJMEI.2012.046988</pub-id></nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Hariharan</surname>
            <given-names>M</given-names>
          </name>
          <name name-style="western">
            <surname>Chee</surname>
            <given-names>LS</given-names>
          </name>
          <name name-style="western">
            <surname>Ai</surname>
            <given-names>OC</given-names>
          </name>
          <name name-style="western">
            <surname>Yaacob</surname>
            <given-names>S</given-names>
          </name>
        </person-group>
        <article-title>Classification of speech dysfluencies using LPC based parameterization techniques</article-title>
        <source>J Med Syst</source>  
        <year>2012</year>  
        <month>06</month>  
        <volume>36</volume>  
        <issue>3</issue>  
        <fpage>1821</fpage>  
        <lpage>1830</lpage>  
        <pub-id pub-id-type="doi">10.1007/s10916-010-9641-6</pub-id>
        <pub-id pub-id-type="medline">21249515</pub-id></nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Martín Ruiz</surname>
            <given-names>ML</given-names>
          </name>
          <name name-style="western">
            <surname>Valero Duboy</surname>
            <given-names>MA</given-names>
          </name>
          <name name-style="western">
            <surname>Torcal</surname>
            <given-names>LC</given-names>
          </name>
          <name name-style="western">
            <surname>Pau de la Cruz</surname>
            <given-names>I</given-names>
          </name>
        </person-group>
        <article-title>Evaluating a web-based clinical decision support system for language disorders screening in a nursery school</article-title>
        <source>J Med Internet Res</source>  
        <year>2014</year>  
        <volume>16</volume>  
        <issue>5</issue>  
        <fpage>e139</fpage>  
        <comment>
          <ext-link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:type="simple" xlink:href="http://www.jmir.org/2014/5/e139/"/>
        </comment>  
        <pub-id pub-id-type="doi">10.2196/jmir.3263</pub-id>
        <pub-id pub-id-type="medline">24870413</pub-id>
        <pub-id pub-id-type="pii">v16i5e139</pub-id>
        <pub-id pub-id-type="pmcid">PMC4060144</pub-id></nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="web">
        <person-group person-group-type="author">
          <collab>Numenta INC</collab>
        </person-group>
        <source>Hierarchical Temporal Memory (HTM)</source>  
        <year>2015</year>  
        <access-date>2016-02-09</access-date>
        <comment>
          <ext-link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:type="simple" xlink:href="https://github.com/numenta/nupic/wiki/Hierarchical-Temporal-Memory-Theory">https://github.com/numenta/nupic/wiki/Hierarchical-Temporal-Memory-Theory</ext-link>
          <ext-link ext-link-type="webcite" xlink:href="6fAybAP55"/>
        </comment> </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="confproc">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Unger</surname>
            <given-names>C</given-names>
          </name>
          <name name-style="western">
            <surname>Bühmann</surname>
            <given-names>L</given-names>
          </name>
          <name name-style="western">
            <surname>Lehmann</surname>
            <given-names>J</given-names>
          </name>
          <name name-style="western">
            <surname>Ngonga</surname>
            <given-names>NA</given-names>
          </name>
          <name name-style="western">
            <surname>Gerber</surname>
            <given-names>D</given-names>
          </name>
          <name name-style="western">
            <surname>Cimiano</surname>
            <given-names>P</given-names>
          </name>
        </person-group>
        <article-title>Template-based question answering over RDF data</article-title>
        <year>2012</year>  
        <month>04</month>  
        <day>20</day>  
        <conf-name>21st International Conference on World Wide Web</conf-name>
        <conf-date>2012</conf-date>
        <conf-loc>Lyon</conf-loc>
        <fpage>639</fpage>  
        <lpage>648</lpage>  
        <pub-id pub-id-type="doi">10.1145/2187836.2187923</pub-id></nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
        <person-group person-group-type="author">
          <name name-style="western">
            <surname>Deng</surname>
            <given-names>L</given-names>
          </name>
        </person-group>
        <article-title>Deep Learning: Methods and Applications</article-title>
        <source>FNT in Signal Processing</source>  
        <year>2013</year>  
        <volume>7</volume>  
        <issue>3-4</issue>  
        <fpage>197</fpage>  
        <lpage>387</lpage>  
        <pub-id pub-id-type="doi">10.1561/2000000039</pub-id></nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
