<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.0" xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMI</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Med Inform</journal-id>
      <journal-title>JMIR Medical Informatics</journal-title>
      <issn pub-type="epub">2291-9694</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v9i7e28754</article-id>
      <article-id pub-id-type="pmid">34269683</article-id>
      <article-id pub-id-type="doi">10.2196/28754</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Depression Detection on Reddit With an Emotion-Based Attention Network: Algorithm Development and Validation</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Hao</surname>
            <given-names>Tianyong</given-names>
          </name>
        </contrib>
        <contrib contrib-type="editor">
          <name>
            <surname>Huang</surname>
            <given-names>Zhengxing</given-names>
          </name>
        </contrib>
        <contrib contrib-type="editor">
          <name>
            <surname>Tang</surname>
            <given-names>Buzhou</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Qian</surname>
            <given-names>Tieyun</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Han</surname>
            <given-names>Jin</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author">
          <name name-style="western">
            <surname>Ren</surname>
            <given-names>Lu</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-7200-9010</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Lin</surname>
            <given-names>Hongfei</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-0872-7688</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Xu</surname>
            <given-names>Bo</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-5453-978X</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Zhang</surname>
            <given-names>Shaowu</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0796-2750</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Yang</surname>
            <given-names>Liang</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Dalian University of Technology</institution>
            <addr-line>No. 2 Linggong Road</addr-line>
            <addr-line>Dalian, </addr-line>
            <country>China</country>
            <phone>86 041184706009</phone>
            <email>liang@dlut.edu.cn</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-5557-7515</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Sun</surname>
            <given-names>Shichang</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0802-8708</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Dalian University of Technology</institution>
        <addr-line>Dalian</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>State Key Lab for Novel Software Technology</institution>
        <institution>Nanjing University</institution>
        <addr-line>Nanjing</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Dalian Minzu University</institution>
        <addr-line>Dalian</addr-line>
        <country>China</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Liang Yang <email>liang@dlut.edu.cn</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <month>7</month>
        <year>2021</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>16</day>
        <month>7</month>
        <year>2021</year>
      </pub-date>
      <volume>9</volume>
      <issue>7</issue>
      <elocation-id>e28754</elocation-id>
      <history>
        <date date-type="received">
          <day>13</day>
          <month>3</month>
          <year>2021</year>
        </date>
        <date date-type="rev-request">
          <day>5</day>
          <month>5</month>
          <year>2021</year>
        </date>
        <date date-type="rev-recd">
          <day>11</day>
          <month>5</month>
          <year>2021</year>
        </date>
        <date date-type="accepted">
          <day>19</day>
          <month>5</month>
          <year>2021</year>
        </date>
      </history>
      <copyright-statement>©Lu Ren, Hongfei Lin, Bo Xu, Shaowu Zhang, Liang Yang, Shichang Sun. Originally published in JMIR Medical Informatics (https://medinform.jmir.org), 16.07.2021.</copyright-statement>
      <copyright-year>2021</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Informatics, is properly cited. The complete bibliographic information, a link to the original publication on https://medinform.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://medinform.jmir.org/2021/7/e28754" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>As a common mental disease, depression seriously affects people’s physical and mental health. According to the statistics of the World Health Organization, depression is one of the main reasons for suicide and self-harm events in the world. Therefore, strengthening depression detection can effectively reduce the occurrence of suicide or self-harm events so as to save more people and families. With the development of computer technology, some researchers are trying to apply natural language processing techniques to detect people who are depressed automatically. Many existing feature engineering methods for depression detection are based on emotional characteristics, but these methods do not consider high-level emotional semantic information. The current deep learning methods for depression detection cannot accurately extract effective emotional semantic information.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>In this paper, we propose an emotion-based attention network, including a semantic understanding network and an emotion understanding network, which can capture the high-level emotional semantic information effectively to improve the depression detection task.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>The semantic understanding network module is used to capture the contextual semantic information. The emotion understanding network module is used to capture the emotional semantic information. There are two units in the emotion understanding network module, including a positive emotion understanding unit and a negative emotion understanding unit, which are used to capture the positive emotional information and the negative emotional information, respectively. We further proposed a dynamic fusion strategy in the emotion understanding network module to fuse the positive emotional information and the negative emotional information.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>We evaluated our method on the Reddit data set. The experimental results showed that the proposed emotion-based attention network model achieved an accuracy, precision, recall, and F-measure of 91.30%, 91.91%, 96.15%, and 93.98%, respectively, which are comparable results compared with state-of-the-art methods.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>The experimental results showed that our model is competitive with the state-of-the-art models. The semantic understanding network module, the emotion understanding network module, and the dynamic fusion strategy are effective modules for depression detection. In addition, the experimental results verified that the emotional semantic information was effective in depression detection.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>depression detection</kwd>
        <kwd>attention network</kwd>
        <kwd>emotional semantic information</kwd>
        <kwd>dynamic fusion strategy</kwd>
        <kwd>natural language processing</kwd>
        <kwd>social media</kwd>
        <kwd>emotion</kwd>
        <kwd>mental health</kwd>
        <kwd>algorithm</kwd>
        <kwd>deep learning</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Background</title>
        <p>As defined in the free dictionary, depression refers to the act of depressing or state of being depressed. Depression is usually regarded as one type of mood disorder; the main clinical feature of depression is the significant and persistent mood depression. The depressed patients’ emotion can range from gloomy to grief, low self-esteem, and even to pessimism, which may cause suicidal attempts or behaviors [<xref ref-type="bibr" rid="ref1">1</xref>]. The World Psychiatric Association set October 10 as the World Mental Health Day in 1992 to strengthen the awareness of the public on mental disorders. The latest report released by the World Health Organization (WHO) pointed out that [<xref ref-type="bibr" rid="ref2">2</xref>] there were approximately 322 million patients with depression in the world, and the prevalence rate was about 4.4%. The number of patients with depression is growing year by year. From 2005 to 2015, the number of patients with depression worldwide increased by 18.4%. According to the statistics of the WHO [<xref ref-type="bibr" rid="ref2">2</xref>], depression is one of the 20 main reasons that can cause suicide in the world, accounting for about 1.5% of suicides. It also accounts for the highest proportion of disability among the global diseases and is the main factor of global nonfatal health loss.</p>
        <p>With the development of the internet in people’s daily life, people began to share their feelings and problems on social media [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref4">4</xref>] such as Reddit and Twitter. The research of Park et al [<xref ref-type="bibr" rid="ref5">5</xref>] showed that people with depression tend to post information about depression and even treatment on social media. Thus, we can get a lot of valuable information from social media. If we can judge whether a person has depression based on the information from the internet, it can help the doctors intervene early and avoid the happening of self-injury or suicide. Many researchers, coming from different disciplines such as computer science and psychology, have paid much attention on this topic. In addition, some advanced methods are proposed for depression detection. However, the detection accuracy still needs to be improved.</p>
        <p>The goal of depression detection is to classify a person or a post as depressed or not. The performance of depression detection on social media can help with the clinical treatment of depression. This problem needs to be solved. The posts of patients with depression usually contain strong emotions. We give three examples of the textual posts left on Reddit, including two depression-indicative posts and one standard post as follows.</p>
        <list list-type="bullet">
          <list-item>
            <p>Example 1: “Today, I feel so horrible, it makes me want to die I made a fool of myself at work, felt so stupid after the meeting so I left work, told the boss I’m sick. Spent the remaining afternoon in bed.” Label: depression</p>
          </list-item>
          <list-item>
            <p>Example 2: “That feeling when you hate who you are as a person but can’t get yourself to change because you are so used to being like this for the past several years. I’ve become a shitty person. The thought of change seems impossible to me at this point.” Label: depression</p>
          </list-item>
          <list-item>
            <p>Example 3: “Looking for cool ways to tell parents my wife is pregnant.” Label: nondepression</p>
          </list-item>
        </list>
        <p>Examples 1 and 2 contain strong emotional information made by the patients with depression. From example 1, the words, including <italic>horrible</italic>, <italic>die</italic>, and <italic>stupid</italic>, express strong negative emotions of the author. The words <italic>hate</italic> and <italic>shitty</italic> in example 2 also express the author’s strong negative emotions. Example 3 shows the post of a regular user. It does not contain strong negative emotions. As previously mentioned, emotional semantic information usually provides us useful clues for depression detection.</p>
        <p>We also counted the proportion of the positive words and the negative words that appeared in the depression-indicative posts and the standard posts of the Reddit data set [<xref ref-type="bibr" rid="ref6">6</xref>], respectively. The statistical results are shown in <xref ref-type="table" rid="table1">Table 1</xref>. The percentage of positive emotion words in the table is calculated by <inline-graphic xlink:href="medinform_v9i7e28754_fig6.png" xlink:type="simple" mimetype="image"/>. The percentage of negative emotion words was similar. In addition, we calculated the percentages of emotion words in the depression-indicative posts and the standard posts. The depressed users used more negative words than the nondepressed users. At the same time, they used less positive words in their posts than the nondepressed users. It can be concluded from the statistical results that the emotional semantic information may play an effective role for the depression detection task.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Percentage of emotion words in posts.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="500"/>
            <col width="250"/>
            <col width="250"/>
            <thead>
              <tr valign="top">
                <td>Categories</td>
                <td>Depression-indicative posts (%)</td>
                <td>Standard posts (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Positive emotion words</td>
                <td>8.62</td>
                <td>9.41</td>
              </tr>
              <tr valign="top">
                <td>Negative emotion words</td>
                <td>6.70</td>
                <td>4.85</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <p>Detecting depression automatically has made some progress. Many existing models detect depression based on the feature engineering such as bag of words [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref8">8</xref>], latent Dirichlet allocation (LDA) [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref10">10</xref>], N-gram [<xref ref-type="bibr" rid="ref11">11</xref>], Linguistic Inquiry and Word Count (LIWC) dictionary [<xref ref-type="bibr" rid="ref12">12</xref>], or their combinations [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>]. Bag of words, LDA, and N-gram have been widely used in natural language processing (NLP) for feature extraction and have achieved great progress. LIWC can carry out quantitative analysis on the word categories (especially psychological words) of the text content, including the sentiment, emotion, and so on. Emotion extracted by LIWC is often used in the depression detection task. With the development of deep learning in NLP, more and more studies use deep learning models for depression detection. Orabi et al [<xref ref-type="bibr" rid="ref15">15</xref>] proposed a method based on deep learning (convolutional neural network [CNN] and recurrent neural network [RNN]) to detect depression. Gui et al [<xref ref-type="bibr" rid="ref16">16</xref>] proposed a reinforcement learning method based on RNN for depression detection. Although these advanced deep learning based models can extract higher-level semantic information and have achieved great progress, they still lack effective extraction of the emotional semantic information. This may limit the ability of their model because the emotional information may bring effective clues for depression detection, as shown in examples 1 and 2.</p>
        <p>Before introducing our model and to understand our paper more conveniently, we give several definitions of concepts, including high-level emotional semantic information, semantic understanding network (SUN), emotion understanding network (EUN), and dynamic fusion strategy.</p>
        <list list-type="bullet">
          <list-item>
            <p>High-level emotional semantic information denotes the emotional semantic information that is captured by deep learning.</p>
          </list-item>
          <list-item>
            <p>SUN is a deep learning method that is used to capture the contextual semantic information in the text for depression detection.</p>
          </list-item>
          <list-item>
            <p>EUN is a deep learning method that is used to capture the emotional semantic information in the text for depression detection.</p>
          </list-item>
          <list-item>
            <p>Dynamic fusion strategy denotes a fusion strategy that can fuse positive emotional information and negative emotional information dynamically.</p>
          </list-item>
        </list>
        <p>To extract the emotional information effectively, we propose an emotion-based attention network (EAN) for depression detection. Our EAN model mainly contains two modules, including a SUN and an EUN. The SUN module is used to capture the contextual semantic information, which has been widely used in NLP. The EUN module is used to capture the emotional information because the emotional information plays an important role for depression detection as previously mentioned. As shown in <xref ref-type="table" rid="table1">Table 1</xref>, the depression-indicative posts contained more negative words and less positive words, and the standard posts contained less negative words and more positive words. Thus, we designed the EUN module. The EUN module contains two units, including a positive emotion understanding unit and a negative emotion understanding unit, which are used to extract the positive emotional information and the negative emotional information, respectively. Apart from it, we also propose a dynamic fusion strategy in the EUN module to fuse the positive emotion information and the negative emotion information.</p>
        <p>The main contributions of this paper can be summarized as follows:</p>
        <list list-type="bullet">
          <list-item>
            <p>We propose a new deep learning framework for depression detection. We also design a special module to explicitly extract the high-level emotion information for depression detection in our framework.</p>
          </list-item>
          <list-item>
            <p>We take into consideration the positive emotion information and the negative emotion information simultaneously. At the same time, we apply a dynamic fusion strategy to fuse the positive emotion information and the negative information.</p>
          </list-item>
          <list-item>
            <p>We conduct experiments on the Reddit data set for depression detection. The experiments show our model can get state-of-the-art or comparable performance. The ablation study also verifies the effectiveness of the components proposed in our model.</p>
          </list-item>
        </list>
      </sec>
      <sec>
        <title>Related Work</title>
        <p>In this section, we review the related work about depression detection on social media.</p>
        <p>In recent years, with the development of social media, more and more people are willing to post their thoughts, emotions, or life details on social media, including Reddit, Twitter, and so on. Park et al [<xref ref-type="bibr" rid="ref5">5</xref>] showed that people with depression tend to post information about depression and even treatment on social media. Thus, we can get a lot of valuable information from social media. More and more researchers began to analyze the mental health of the users based on the information from social media. As a result, depression detection based on social media has attracted a lot of attention.</p>
        <p>De Choudhury et al [<xref ref-type="bibr" rid="ref17">17</xref>] collected data from Twitter about the users with depression and the regular user, and combined the difference between their behavior on social media (depressed users manifested as decreased social activities, increased negative emotions and self-concern, a high degree and increased expression of religious thoughts, etc) and established a characteristic model for depression detection. Park et al [<xref ref-type="bibr" rid="ref18">18</xref>] tested for users with depression through social media and conducted semistructured face-to-face interviews with 14 active users. The study concluded that users with depression regarded social media as a platform for social awareness and emotional sharing, while users with nondepression regarded social media as a platform for sharing information. Thus, emotional information is important in the task of detecting depression in social media.</p>
        <p>Most of the existing methods for depression detection are based on feature engineering. LIWC is usually used to extract individual psychological states, such as positive and negative emotions, pronouns, and so on. Therefore, LIWC was often used for the depression detection task [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref12">12</xref>-<xref ref-type="bibr" rid="ref14">14</xref>]. Kang et al [<xref ref-type="bibr" rid="ref19">19</xref>] proposed a multimodal method for depression detection including text analysis, a word-based emoticon analysis, and a support vector machine–based image classifier. The authors applied visual sentiment ontology [<xref ref-type="bibr" rid="ref20">20</xref>] and SentiStrength dictionaries to build a mood lexicon for emoticon analysis to enhance the results of depression detection. Shen et al [<xref ref-type="bibr" rid="ref21">21</xref>] extracted six depression-related feature groups (including social network feature, user profile feature, visual feature, emotional feature, topic-level feature, and domain-specific feature) for depression detection. Hiraga [<xref ref-type="bibr" rid="ref22">22</xref>] extracted linguistic features for depression detection, including character n-grams, token n-grams, and lemmas and selected lemmas. Hussain et al [<xref ref-type="bibr" rid="ref3">3</xref>] developed an application called the Socially Mediated Patient Portal. The application could generate a series of features for depression detection.</p>
        <p>Shneidman [<xref ref-type="bibr" rid="ref23">23</xref>] presented depression that tended to be closely related to suicide. De Choudhury et al [<xref ref-type="bibr" rid="ref24">24</xref>] analyzed Reddit users’ posts on the topic of mental health that later turned to the topic of suicidal thoughts. This turn could be predicted by traits such as self-focus, poor language style, reduced social engagement, and expressions of despair or anxiety. Yates et al [<xref ref-type="bibr" rid="ref25">25</xref>] proposed a neural framework for depression detection, and they presented that self-harm was closely related to depression. The Conference and Labs of Evaluation Forum for Early Risk Prediction (CLEF eRISK) is a public competition about different areas such as health and safety [<xref ref-type="bibr" rid="ref26">26</xref>]. CLEF eRISK 2018 is about the early detection of depression and anorexia [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref27">27</xref>]. CLEF eRISK 2019 is about the severity of symptoms of depression, self-injury, and anorexia [<xref ref-type="bibr" rid="ref28">28</xref>].</p>
        <p>Different from traditional feature engineering-based methods, deep learning methods mostly apply end-to-end models. Yates et al [<xref ref-type="bibr" rid="ref25">25</xref>] proposed a neural framework based on a CNN for depression detection. Orabi et al [<xref ref-type="bibr" rid="ref15">15</xref>] proposed a neural method based on a CNN and RNN for depression detection. Song et al [<xref ref-type="bibr" rid="ref29">29</xref>] proposed a neural network that was named the feature attention network for depression detection. Gui et al [<xref ref-type="bibr" rid="ref16">16</xref>] proposed a reinforcement learning method based on long short-term memory (LSTM) for depression detection. Ray et al [<xref ref-type="bibr" rid="ref30">30</xref>] proposed a multilevel attention network to fuse the features from the multimodal for depression detection.</p>
        <p>According to previous research on depression detection, it can be concluded that the emotional information is important in the task of depression detection. In addition, deep learning can take high-level semantic information into account, but the current deep learning methods for depression detection still lack effective extraction of the emotional semantic information. Thus, we propose a deep learning model to consider the high-level emotional information that is captured by the deep learning method for depression detection, which is named the EAN.</p>
        <p>The structure of this paper is organized as follows. The Introduction section introduced the background and related work. The Methods section shows the details of the proposed model. The Results section gives the experiments in this paper. The Discussion section shows the conclusions and future work.</p>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Data Sets</title>
        <p>As a newly developed social media, Reddit has become a widely popular web-based discussion forum. Reddit users can discuss a variety of topics on this web-based platform anonymously. The topics discussed on the platform can be arranged in more than a million discussion groups. Due to the large amount of discussion text, Reddit attracts many researchers to conduct their studies with the data on the Reddit platform. Pirina and Çöltekin [<xref ref-type="bibr" rid="ref6">6</xref>] built a data set for depression detection based on Reddit, which was named the Reddit data set. The samples in the Reddit data set [<xref ref-type="bibr" rid="ref6">6</xref>] are collected from the Reddit platform. The Reddit data set [<xref ref-type="bibr" rid="ref6">6</xref>] contains 1293 depression-indicative posts and 549 standard posts.</p>
        <p>We preprocessed the Reddit data set, such as removing the stop words. We then counted the occurrence number of each word for the depression-indicative posts and the standard posts. We sorted the words according to the statistics and show the top of the word lists in <xref rid="figure1" ref-type="fig">Figure 1</xref>. We also counted the occurrence number of the positive emotion words and the negative emotion words for the depression-indicative posts and the standard posts. For all of the words, the positive emotion words and the negative emotion words with high frequency of occurrence are also shown in <xref ref-type="boxed-text" rid="box1">Textbox 1</xref>.</p>
        <p>As shown in <xref ref-type="boxed-text" rid="box1">Textbox 1</xref>, from the most commonly used words of the depressed users, we can see many negatives words are also included in the most commonly used words such as depression or fucking. The most common words for nondepressed people are commonly used words in daily life. As can be seen from the list of negative words with high frequency of occurrence used by users with depression, the negative words used by users with depression are more intense than the negative words appearing in the posts of nondepressed users, such as suicide, die, kill, and hate.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>The architecture of the emotion-based attention network model. There are two parts in our model, including a SUN and an EUN. bi-LSTM: bidirectional long short-term memory.</p>
          </caption>
          <graphic xlink:href="medinform_v9i7e28754_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <boxed-text id="box1" position="float">
          <title>Data analysis.</title>
          <p>
            <bold>Depression-indicative posts</bold>
          </p>
          <list list-type="bullet">
            <list-item>
              <p>All text: i’m, like, feel, want, get, know, even, really, people, life, i’ve, one, time, think, would, never, depression, me, can’t, go, going, things, don’t, much, friends, make, good, it, still, could, back, anyone, years, anything, always, every, got, someone, fucking, help, day, see, something, work, ever, need, feeling, everything, talk, year</p>
            </list-item>
            <list-item>
              <p>Positive: friends, good, work, help, better, happy, job, love, hard, friend, family, care, wanted, best, sleep, sure, self, mind, understand, new, mental, hope, social, money, high, remember, working, reason, okay, close, real, together, great, normal, deal, believe, change, enjoy, birthday, honestly, nice, motivation, advice, loved, therapist, happiness, fun, boyfriend, saying, big</p>
            </list-item>
            <list-item>
              <p>Negative: depression, depressed, bad, fucking, nothing, alone, hate, shit, stop, lost, worse, anxiety, fuck, tired, sad, die, suicide, kill, relationship, wrong, pain, suicidal, problems, old, sorry, cry, lonely, therapy, hurt, stupid, constantly, issues, sick, crying, problem, afraid, weird, reddit, hospital, worst, hang, illness, dead, scared, dark, broken, shitty, broke, miserable, died</p>
            </list-item>
          </list>
          <p>
            <bold>Standard posts</bold>
          </p>
          <list list-type="bullet">
            <list-item>
              <p>All text: like, i’m, know, friend, would, feel, really, friends, want, time, get, one, even, said, always, never, told, got, family, go, things, me, think, best, make, mom, going, people, years, talk, also, still, back, something, much, see, say, could, i’ve, dad, tell, since, don’t, started, us, me, it, made, help, parents</p>
            </list-item>
            <list-item>
              <p>Positive: friend, friends, family, best, sister, help, friendship, work, brother, good, new, sure, love, wanted, saying, together, advice, father, close, money, boyfriend, kids, care, hard, better, mad, understand, job, basically, happy, great, deal, child, high, moved, believe, fun, social, mind, baby, conversation, eventually, reason, married, big, change, spend, real, normal, nice</p>
            </list-item>
            <list-item>
              <p>Negative: bad, wrong, nothing, old, hang, problem, stop, hurt, upset, sorry, shit, issues, lost, alone, cut, angry, hate, problems, worse, depression, weird, sick, constantly, anxiety, sad, tired, annoyed, broke, bitch, scared, died, hell, afraid, crying, cancer, toxic, ignore, pregnant, lose, difficult, wait, fault, depressed, horrible, awkward, selfish, reply, fuck, confused, reddit</p>
            </list-item>
          </list>
        </boxed-text>
      </sec>
      <sec>
        <title>Overview of the EAN Model</title>
        <p>In this section, we introduce the proposed model for depression detection briefly, which is called the EAN, as shown in <xref rid="figure1" ref-type="fig">Figure 1</xref>. The proposed EAN model mainly contains two parts, including a SUN and an EUN. The SUN module is used to capture the contextual semantic information in the depression-indicative posts. The EUN module is used to capture the emotional semantic information in the depression-indicative posts. Finally, we concatenated the features captured by the two parts and judged whether the text is depression-indicative or not by the depression detector. We give details on the SUN, the EUN, and the loss function next.</p>
      </sec>
      <sec>
        <title>Semantic Understanding Network</title>
        <p>The SUN was used to capture the contextual semantic information in the text for depression detection. There are three layers in the SUN module, including the word encoding layer, context encoding layer, and attention mechanism (Att) layer. We will introduce these three layers in more details.</p>
        <sec>
          <title>Word Encoding Layer</title>
          <p>We will introduce the word encoding layer in the SUN module briefly. The input of our task is text. The text can be denoted as w = {<italic>w</italic><sub>1</sub>, <italic>w</italic><sub>2</sub>, ..., <italic>w<sub>n</sub></italic>}, where n denotes the length of the text, and <italic>w<sub>i</sub></italic> denotes the word in the text. In NLP tasks, words are usually mapped to the form of word vectors. Inspired by it, we also encoded every word into d-dimension word vector. We applied the pretrained Global Vectors for Word Representation (GloVe) [<xref ref-type="bibr" rid="ref31">31</xref>] here. We then can get the textual representation S = <italic>R<sup>n</sup></italic> <sup>×</sup> <italic><sup>d</sup></italic>, where n is the textual length and d is the dimension of the word.</p>
        </sec>
        <sec>
          <title>Context Encoding Layer</title>
          <p>The context encoding layer was used to obtain contextual information. Bidirectional long short-term memory (Bi-LSTM) [<xref ref-type="bibr" rid="ref32">32</xref>] was widely used in NLP tasks to capture the contextual information. Inspired by this, we applied Bi-LSTM in the context encoding layer. Bi-LSTM contains a forward directional LSTM and a backward directional LSTM. The output <inline-graphic xlink:href="medinform_v9i7e28754_fig7.png" xlink:type="simple" mimetype="image"/> Bi-LSTM contains two parts, including the forward LSTM output <inline-graphic xlink:href="medinform_v9i7e28754_fig8.png" xlink:type="simple" mimetype="image"/> and the backward LSTM output <inline-graphic xlink:href="medinform_v9i7e28754_fig9.png" xlink:type="simple" mimetype="image"/>.</p>
          <p>LSTM was proposed by Hochreiter and Schmidhuber [<xref ref-type="bibr" rid="ref33">33</xref>] and was used to capture the forward information in the text. LSTM cannot capture the backward information; therefore, Bi-LSTM was proposed. LSTM owns three gates and one cell, including an input gate <italic>i<sub>t</sub></italic>, a forget gate <italic>f<sub>t</sub></italic>, an output gate <italic>o<sub>t</sub></italic>, and a memory cell <italic>c<sub>t</sub></italic>. The operations of LSTM are as following.</p>
          <disp-formula>
            <graphic xlink:href="medinform_v9i7e28754_fig10.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </disp-formula>
          <p>Where <italic>x<sub>t</sub></italic> is the current input word vector, <inline-graphic xlink:href="medinform_v9i7e28754_fig11.png" xlink:type="simple" mimetype="image"/> means the elementwise multiplication operation, and <italic>σ</italic> means the sigmoid function. <italic>W<sub>f</sub></italic>, <italic>W<sub>i</sub></italic>, <italic>W<sub>c</sub></italic>, and <italic>W<sub>o</sub></italic> represent the parameters that can be trained in the training processing. <italic>h<sub>t</sub></italic> is the hidden state vector. <inline-graphic xlink:href="medinform_v9i7e28754_fig12.png" xlink:type="simple" mimetype="image"/> is the output of LSTM. More details on LSTM can be found in Hochreiter and Schmidhuber [<xref ref-type="bibr" rid="ref33">33</xref>], and the output of Bi-LSTM is H = [<italic>H</italic><sub>1</sub>, <italic>H</italic>2, ..., H<italic><sub>n</sub></italic>].</p>
        </sec>
        <sec>
          <title>Attention Mechanism Layer</title>
          <p>The input of the Att layer is H = [<italic>H</italic><sub>1</sub>, <italic>H</italic><sub>2</sub>, ..., <italic>H<sub>n</sub></italic>]. The Att is used to assign higher weights on the important words. We applied the Att to capture the important words in the depression-indicative posts for the depression detection task. The operations of the Att are based on the following equations:</p>
          <disp-formula>
            <graphic xlink:href="medinform_v9i7e28754_fig13.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </disp-formula>
          <p>Where <italic>H<sub>i</sub></italic> is the hidden state vector of Bi-LSTM, <italic>w</italic> and <italic>q<sub>i</sub></italic> are the weighted matrices, and <italic>h<sub>att</sub></italic> is the output of the Att.</p>
        </sec>
      </sec>
      <sec>
        <title>Emotion Understanding Network</title>
        <p>Many research papers [<xref ref-type="bibr" rid="ref19">19</xref>-<xref ref-type="bibr" rid="ref21">21</xref>] and their experiments have proven the effectiveness of emotional feature in depression detection tasks. Inspired by this, we considered the high-level emotional semantic information in the depression-indicative posts based on the EUN. The EUN was used to capture the emotional semantic information in the text for depression detection. There are three layers in the EUN module, including the input layer, emotion encoding layer, and emotion fusion layer. We introduce these three layers in more detail in the following sections.</p>
        <sec>
          <title>Input Layer</title>
          <p>In this section, we introduce the inputs of the EUN module. The inputs include a positive emotion part and a negative emotion part. We applied the SenticNet application programming interface to divide the original texts into a positive emotional part and a negative emotional part. These two emotional parts are also mapped into a matrix of word vectors as in the word encoding layer in the SUN module, named <italic>R<sub>pos</sub></italic> and <italic>R<sub>neg</sub></italic>, respectively.</p>
        </sec>
        <sec>
          <title>Emotion Encoding Layer</title>
          <p>The emotion encoding layer is to encode the positive emotional information and the negative emotional information. <italic>R<sub>pos</sub></italic> and <italic>R<sub>neg</sub></italic> act as the inputs of the emotion encoding layer. There are two units in the emotion encoding layer, including the positive emotion understanding unit and the negative emotion understanding unit. These two units are used to capture positive emotional information and negative emotional information, respectively. We also applied Bi-LSTM to capture the contextual emotional information and the Att to capture the important emotions in the text in both units. The operations of Bi-LSTM and the Att are the same as the EUN module. We can get <italic>h<sub>pos</sub></italic> from the positive emotion understanding unit and <italic>h<sub>neg</sub></italic> from the negative emotion understanding unit.</p>
        </sec>
        <sec>
          <title>Emotion Fusion Layer</title>
          <p>The goal of the emotion fusion layer is to fuse the positive emotional information and the negative emotional information for depression detection. We get the positive emotional information <italic>h<sub>pos</sub></italic> and the negative emotional information <italic>h<sub>neg</sub></italic> from the emotion encoding layer, which can be learned in the training processing. Considering the difference of each text, we designed a dynamic fusion strategy that can dynamically fuse the positive emotional information <italic>h<sub>pos</sub></italic> and the negative emotional information <italic>h<sub>neg</sub></italic>. Inspired by the Att, we design a random floating point number θ∈[0,1]. It can be trained during the training. We can get the output <italic>h<sub>emo</sub></italic> of the EUN module with the following formula:</p>
          <disp-formula><italic>h<sub>emo</sub></italic> = θ * <italic>h<sub>pos</sub></italic> + (1 – θ) * <italic>h<sub>neg</sub></italic> <bold>(10)</bold></disp-formula>
        </sec>
      </sec>
      <sec>
        <title>Loss Function</title>
        <p>As previously described, we get the contextual semantic information <italic>h<sub>att</sub></italic> from the SUN module and the emotional semantic information <italic>h<sub>emo</sub></italic> from the EUN module. In this section, we applied a concatenation operation to fuse the contextual semantic information <italic>h<sub>att</sub></italic> and the emotional semantic information <italic>h<sub>emo</sub></italic> as the final representation <italic>f<sub>final</sub></italic>:</p>
        <disp-formula><italic>f<sub>final</sub></italic> = concatenate[<italic>h<sub>att</sub></italic>; <italic>h<sub>emo</sub></italic>] <bold>(11)</bold></disp-formula>
        <p>Accordingly, the final classification decision for depression detection is formulated by the softmax function:</p>
        <disp-formula>y = softmax(W ∙ <italic>f<sub>final</sub></italic> + b) <bold>(12)</bold></disp-formula>
        <p>The cross-entropy loss was used for depression detection in our model. The training goal was to minimize the loss.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Implementation Details and Metrics</title>
        <p>The unit size of Bi-LSTM in our experiments was 64. We applied the pretrained 300-dimension word embedding (GloVe) in the word encoding layer. In addition, the optimization function was Adam, and the batch size was 128. Following Tadesse et al [<xref ref-type="bibr" rid="ref4">4</xref>], we also applied a 10-fold cross validation in our experiments; 90% of posts in the data sets were used as our training set, and the other 10% of posts were used as the testing set.</p>
        <p>We applied the standard metrics, including accuracy, precision, recall, and F1-score, to evaluate the effectiveness of our model for depression detection. F1 is defined as follows:</p>
        <disp-formula>
          <graphic xlink:href="medinform_v9i7e28754_fig14.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </disp-formula>
      </sec>
      <sec>
        <title>Comparison With Existing Methods</title>
        <p>We compared the results of our model with many state-of-the-art methods on the Reddit data set. We compared it with the baselines, including LIWC, LDA, unigram, bigram, LIWC + LDA + unigram, LIWC + LDA + bigram [<xref ref-type="bibr" rid="ref4">4</xref>], LSTM, Bi-LSTM, and Bi-LSTM + Att.</p>
        <list list-type="bullet">
          <list-item>
            <p>LIWC: Tadesse et al [<xref ref-type="bibr" rid="ref4">4</xref>] extracted the linguistic features and the psychological features based on LIWC [<xref ref-type="bibr" rid="ref34">34</xref>] for depression detection.</p>
          </list-item>
          <list-item>
            <p>LDA: Tadesse et al [<xref ref-type="bibr" rid="ref4">4</xref>] extracted 70 dimensional characteristics of the topic based on LDA. It can be helpful in discovering its underlying topic structures for depression detection.</p>
          </list-item>
          <list-item>
            <p>Unigram: Tadesse et al [<xref ref-type="bibr" rid="ref4">4</xref>] extracted 3000 dimensional characteristics based on unigram in term frequency–inverse document frequency (TF–IDF) for depression detection.</p>
          </list-item>
          <list-item>
            <p>Bigram: Tadesse et al [<xref ref-type="bibr" rid="ref4">4</xref>] extracted 2736 dimensional characteristics based on bigram in TF–IDF for depression detection.</p>
          </list-item>
          <list-item>
            <p>LIWC + LDA + unigram: The model is based on the aforementioned characteristics, including LIWC, LDA, and unigram, for depression detection.</p>
          </list-item>
          <list-item>
            <p>LIWC + LDA + bigram: The model is based on the aforementioned characteristics, including LIWC, LDA, and bigram, for depression detection.</p>
          </list-item>
          <list-item>
            <p>LSTM: LSTM was proposed by Hochreiter and Schmidhuber [<xref ref-type="bibr" rid="ref33">33</xref>]. We applied the same word embedding in this paper, and the unit size was 128.</p>
          </list-item>
          <list-item>
            <p>Bi-LSTM: The Bi-LSTM was proposed by Graves et al [<xref ref-type="bibr" rid="ref32">32</xref>]. We applied the same setting and the same word embedding in this paper.</p>
          </list-item>
          <list-item>
            <p>Bi-LSTM + Att: The model is based on Bi-LSTM and the Att.</p>
          </list-item>
          <list-item>
            <p>EAN: This model is proposed in this paper, which considers emotional semantic information based on deep learning.</p>
          </list-item>
        </list>
        <p>As shown in <xref ref-type="table" rid="table2">Table 2</xref>, the results based on deep learning are generally higher than the results based on feature engineering methods. It is because deep learning can capture the higher semantic information of texts. In addition, we can also get the following conclusions.</p>
        <p>The results based on bigram (bigram and LIWC + LDA + bigram) were higher than unigram (unigram and LIWC + LDA + unigram). It can be concluded that contextual information can improve the results of the model. The results based on Bi-LSTM were higher than LSTM. it can be concluded that considering bidirectional contextual semantic information is necessary. The results based on Bi-LSTM + Att were higher than Bi-LSTM; it can be proven that the Att is effective for the depression detection task. The proposed EAN model got the higher results because we took into consideration both the contextual semantic information and the emotional semantic information.</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Results compared with the existing models.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="200"/>
            <col width="200"/>
            <col width="200"/>
            <col width="200"/>
            <col width="200"/>
            <thead>
              <tr valign="top">
                <td>Model</td>
                <td>Accuracy (%)</td>
                <td>Precision (%)</td>
                <td>Recall (%)</td>
                <td>F1 (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>LIWC<sup>a,b</sup></td>
                <td>70</td>
                <td>74</td>
                <td>71</td>
                <td>72</td>
              </tr>
              <tr valign="top">
                <td>LDA<sup>b,c</sup></td>
                <td>75</td>
                <td>75</td>
                <td>72</td>
                <td>74</td>
              </tr>
              <tr valign="top">
                <td>Unigram<sup>b</sup></td>
                <td>70</td>
                <td>71</td>
                <td>95</td>
                <td>81</td>
              </tr>
              <tr valign="top">
                <td>Bigram<sup>b</sup></td>
                <td>79</td>
                <td>80</td>
                <td>76</td>
                <td>78</td>
              </tr>
              <tr valign="top">
                <td>LIWC + LDA + unigram<sup>b</sup></td>
                <td>78</td>
                <td>84</td>
                <td>79</td>
                <td>81</td>
              </tr>
              <tr valign="top">
                <td>LIWC + LDA + bigram<sup>b</sup></td>
                <td>91</td>
                <td>90</td>
                <td>92</td>
                <td>91</td>
              </tr>
              <tr valign="top">
                <td>LSTM<sup>d</sup></td>
                <td>87.03</td>
                <td>90.30</td>
                <td>91.67</td>
                <td>90.98</td>
              </tr>
              <tr valign="top">
                <td>Bi-LSTM<sup>e</sup></td>
                <td>86.46</td>
                <td>88.08</td>
                <td>95</td>
                <td>91.41</td>
              </tr>
              <tr valign="top">
                <td>Bi-LSTM + Att<sup>f</sup></td>
                <td>88.59</td>
                <td>90.41</td>
                <td>94.96</td>
                <td>92.63</td>
              </tr>
              <tr valign="top">
                <td>EAN<sup>g</sup> (our model)</td>
                <td>91.3</td>
                <td>91.91</td>
                <td>96.15</td>
                <td>93.98</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table2fn1">
              <p><sup>a</sup>LIWC: Linguistic Inquiry and Word Count.</p>
            </fn>
            <fn id="table2fn2">
              <p><sup>b</sup>Indicates that the results are shown in the literature [<xref ref-type="bibr" rid="ref4">4</xref>].</p>
            </fn>
            <fn id="table2fn3">
              <p><sup>c</sup>LDA: latent Dirichlet allocation.</p>
            </fn>
            <fn id="table2fn4">
              <p><sup>d</sup>LSTM: long short-term memory.</p>
            </fn>
            <fn id="table2fn5">
              <p><sup>e</sup>Bi-LSTM: bidirectional long short-term memory.</p>
            </fn>
            <fn id="table2fn6">
              <p><sup>f</sup>Att: attention mechanism.</p>
            </fn>
            <fn id="table2fn7">
              <p><sup>g</sup>EAN: emotion-based attention network.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Detail Analysis</title>
        <p>In this section, we analyze the effectiveness of the two modules (SUN and EUN), the effectiveness of different emotional semantic information, and the effectiveness of the dynamic fusion strategy.</p>
        <sec>
          <title>The Effectiveness of SUN and EUN</title>
          <p>To verify the effectiveness of SUN and EUN, we designed a series of experiments. SUN means the proposed EAN model without the EUN module. EUN means the proposed EAN model without the SUN module. As shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>, the EUN module obtained the worst results. This is because the model only considers the emotional semantic information without the complete semantic information. It verifies the effectiveness of our SUN module. The results of the EAN model were higher than the SUN module, which further verifies the effectiveness of our EUN module.</p>
          <fig id="figure2" position="float">
            <label>Figure 2</label>
            <caption>
              <p>The effectiveness of the SUN and EUN. EAN: emotion-based attention network; EUN: emotion understanding network; SUN: semantic understanding network.</p>
            </caption>
            <graphic xlink:href="medinform_v9i7e28754_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>The Effectiveness of Different Emotional Semantic Information</title>
          <p>To verify the effectiveness of different emotional semantic information, we designed a series of experiments, including without emotion (SUN), without positive emotion (SUN + negative), and without negative emotion (SUN + positive). As shown in <xref rid="figure3" ref-type="fig">Figure 3</xref>, the results of the SUN + positive model and the SUN module were similar. It indicates that positive emotions have less effect on the model. Although the EAN model does not obtain the best recall value, it obtained the best <italic>P</italic> value, ACC value, and F1 value. From the experiments, our proposed EAN model obtained the best result compared to the three aforementioned baseline models. It also verified the effectiveness of each proposed module in our framework.</p>
          <fig id="figure3" position="float">
            <label>Figure 3</label>
            <caption>
              <p>The effectiveness of different emotional semantic information. Acc: accuracy; EAN: emotion-based attention network; P: precision; R: recall; SUN: semantic understanding network.</p>
            </caption>
            <graphic xlink:href="medinform_v9i7e28754_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>The Effectiveness of the Dynamic Fusion Strategy</title>
          <p>To verify the effectiveness of the dynamic fusion strategy, we designed a series of experiments including the EAN model with the concatenate fusion strategy, the EAN model with the fixed fusion strategy, and the EAN model with the dynamic fusion strategy. The EAN (concatenate fusion) model applies the concatenate operation in the emotion fusion strategy. The EAN (fixed fusion) model applies the fixed fusion operation in the emotion fusion layer. The θ in equation 10 is fixed at 0.5. The EAN (dynamic fusion) model is the model proposed in this paper. As shown in <xref rid="figure4" ref-type="fig">Figure 4</xref>, the dynamic fusion method had the best results.</p>
          <p>In this section, we designed a series of experiments to verify the effectiveness of the proposed EAN model, including the two modules in the EAN model, the different emotional semantic information, and the dynamic fusion method.</p>
          <p>Some visualization results of the θ to illustrate the effectiveness of the proposed dynamic fusion strategy intuitively are shown in <xref rid="figure5" ref-type="fig">Figure 5</xref>. As shown in <xref rid="figure5" ref-type="fig">Figure 5</xref>, the examples are both depression-indicative posts. The pie chart indicates the value of the θ in the dynamic fusion strategy. We can see from the results that in the depression-indicative posts, the negative emotional information can be paid more attention.</p>
          <fig id="figure4" position="float">
            <label>Figure 4</label>
            <caption>
              <p>The effectiveness of the dynamic fusion strategy. Acc: accuracy; EAN: emotion-based attention network; P: precision; R: recall.</p>
            </caption>
            <graphic xlink:href="medinform_v9i7e28754_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <fig id="figure5" position="float">
            <label>Figure 5</label>
            <caption>
              <p>The visualization of the θ in the dynamic fusion strategy. GF: girlfriend.</p>
            </caption>
            <graphic xlink:href="medinform_v9i7e28754_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Conclusion</title>
        <p>Depression attracts more and more attention from people and organizations now. With the development of computer technology, some researchers are trying to use computers to automatically identify people who are depressed. In this paper, we proposed an EAN model to explicitly extract the high-level emotion information for the depression detection task. The proposed EAN model consists of the SUN and the EUN. In the proposed model, we took into consideration the positive emotion information and the negative emotion information simultaneously. At the same time, we applied a dynamic fusion strategy to fuse the positive emotion information and the negative information. The experimental results verified that the emotional semantic information is effective in depression detection.</p>
      </sec>
      <sec>
        <title>Future Work</title>
        <p>According to WHO statistics, depression is one of the main causes of suicide in the world. We will focus on the relationship between depression and suicide. We will try to combine suicide detection with depression detection in our future work to improve the performance of both tasks by multitask learning. In addition, the future work will be combined with self-reported depressive symptoms or clinical diagnosis. Hopefully, our study can provide some technical supports in the field of health care.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">Att</term>
          <def>
            <p>attention mechanism</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">Bi-LSTM</term>
          <def>
            <p>bidirectional long short-term memory</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">CLEF eRISK</term>
          <def>
            <p>Conference and Labs of Evaluation Forum for Early Risk Prediction</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">CNN</term>
          <def>
            <p>convolutional neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">EAN</term>
          <def>
            <p>emotion-based attention network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">EUN</term>
          <def>
            <p>emotion understanding network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">GloVe</term>
          <def>
            <p>Global Vectors for Word Representation</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">LDA</term>
          <def>
            <p>latent Dirichlet allocation</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">LIWC</term>
          <def>
            <p>Linguistic Inquiry and Word Count</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">LSTM</term>
          <def>
            <p>long short-term memory</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">NLP</term>
          <def>
            <p>natural language processing</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">RNN</term>
          <def>
            <p>recurrent neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb13">SUN</term>
          <def>
            <p>semantic understanding network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb14">TF–IDF</term>
          <def>
            <p>term frequency–inverse document frequency</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb15">WHO</term>
          <def>
            <p>World Health Organization</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This study was partially supported by a grant from the Natural Science Foundation of China (No. 62076046, 61632011, 62006034, 61876031), the Ministry of Education Humanities and Social Science Project (No. 19YJCZH199), State Key Laboratory of Novel Software Technology (Nanjing University; No. KFKT2021B07), and the Fundamental Research Funds for the Central Universities (No. DUT21RC(3)015).</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Friedrich</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Depression is the leading cause of disability around the world</article-title>
          <source>JAMA</source>
          <year>2017</year>
          <month>04</month>
          <day>18</day>
          <volume>317</volume>
          <issue>15</issue>
          <fpage>1517</fpage>
          <pub-id pub-id-type="doi">10.1001/jama.2017.3826</pub-id>
          <pub-id pub-id-type="medline">28418490</pub-id>
          <pub-id pub-id-type="pii">2618635</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="web">
          <article-title>Depression and other common mental disorders: global health estimates</article-title>
          <source>World Health Organization</source>
          <year>2017</year>
          <access-date>2021-06-28</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://apps.who.int/iris/bitstream/handle/10665/254610/WHO-MSD-MER-2017.2-eng.pdf">https://apps.who.int/iris/bitstream/handle/10665/254610/WHO-MSD-MER-2017.2-eng.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hussain</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Satti</surname>
              <given-names>FA</given-names>
            </name>
            <name name-style="western">
              <surname>Afzal</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>WA</given-names>
            </name>
            <name name-style="western">
              <surname>Bilal</surname>
              <given-names>HSM</given-names>
            </name>
            <name name-style="western">
              <surname>Ansaar</surname>
              <given-names>MZ</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmad</surname>
              <given-names>HF</given-names>
            </name>
            <name name-style="western">
              <surname>Hur</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Bang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>GH</given-names>
            </name>
            <name name-style="western">
              <surname>Seung</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Exploring the dominant features of social media for depression detection</article-title>
          <source>J Inf Sci</source>
          <year>2019</year>
          <month>08</month>
          <day>12</day>
          <volume>46</volume>
          <issue>6</issue>
          <fpage>739</fpage>
          <lpage>759</lpage>
          <pub-id pub-id-type="doi">10.1177/0165551519860469</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tadesse</surname>
              <given-names>MM</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Detection of depression-related posts in Reddit social media forum</article-title>
          <source>IEEE Access</source>
          <year>2019</year>
          <volume>7</volume>
          <fpage>44883</fpage>
          <lpage>44893</lpage>
          <pub-id pub-id-type="doi">10.1109/access.2019.2909180</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Park</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Cha</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Cha</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Depressive moods of users portrayed in twitter</article-title>
          <year>2012</year>
          <conf-name>ACM SIGKDD Workshop on Healthcare Informatics (HI-KDD)</conf-name>
          <conf-date>August 12-16, 2012</conf-date>
          <conf-loc>Beijing, China</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pirina</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Çöltekin</surname>
              <given-names>Ç</given-names>
            </name>
          </person-group>
          <article-title>Identifying depression on reddit: The effect of training data</article-title>
          <year>2018</year>
          <conf-name>018 EMNLP Workshop SMM4H: The 3rd Social Media Mining for Health Applications Workshop &amp; Shared Task</conf-name>
          <conf-date>October 2018</conf-date>
          <conf-loc>Brussels, Belgium</conf-loc>
          <fpage>9</fpage>
          <lpage>12</lpage>
          <pub-id pub-id-type="doi">10.18653/v1/w18-5903</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nadeem</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Identifying depression on twitter</article-title>
          <source>arXiv.</source>
          <comment>Preprint posted online on July 25, 2016
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/1607.07384">https://arxiv.org/abs/1607.07384</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Paul</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kalyani</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Basu</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Early detection of signs of anorexia and depression over social media using effective machine learning frameworks</article-title>
          <year>2018</year>
          <conf-name>CLEF 2018</conf-name>
          <conf-date>September 10-14, 2018</conf-date>
          <conf-loc>Avignon, France</conf-loc>
          <fpage>1</fpage>
          <lpage>9</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Maupomé</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Meurs</surname>
              <given-names>MJ</given-names>
            </name>
          </person-group>
          <article-title>Using topic extraction on social media content for the early detection of depression</article-title>
          <year>2018</year>
          <conf-name>CLEF 2018</conf-name>
          <conf-date>September 10-14, 2018</conf-date>
          <conf-loc>Avignon, France</conf-loc>
          <fpage>2125</fpage>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Resnik</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Armstrong</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Claudino</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>VA</given-names>
            </name>
            <name name-style="western">
              <surname>Boyd-Graber</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Beyond ldaxploring supervised topic modeling for depression-related language in Twitter</article-title>
          <year>2015</year>
          <conf-name>2nd Workshop on Computational Linguistics and Clinical Psychology: From Linguistic Signal to Clinical Reality</conf-name>
          <conf-date>June 5, 2015</conf-date>
          <conf-loc>Denver, Colorado</conf-loc>
          <fpage>e</fpage>
          <pub-id pub-id-type="doi">10.3115/v1/w15-1212</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Benton</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mitchell</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Hovy</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Multi-task learning for mental health using social media text</article-title>
          <source>arXiv.</source>
          <comment>Preprint posted online on December 10, 2017
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/1712.03538">https://arxiv.org/abs/1712.03538</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Coppersmith</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Dredze</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Harman</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Hollingshead</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>From ADHD to SAD: analyzing the language of mental health on twitter through self-reported diagnoses</article-title>
          <year>2015</year>
          <conf-name>2nd Workshop on Computational Linguistics and Clinical Psychology: From Linguistic Signal to Clinical Reality</conf-name>
          <conf-date>June 5, 2015</conf-date>
          <conf-loc>Denver, Colorado</conf-loc>
          <fpage>1</fpage>
          <lpage>10</lpage>
          <pub-id pub-id-type="doi">10.3115/v1/w15-1201</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wolohan</surname>
              <given-names>JT</given-names>
            </name>
            <name name-style="western">
              <surname>Hiraga</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mukherjee</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sayyed</surname>
              <given-names>ZA</given-names>
            </name>
          </person-group>
          <article-title>Detecting linguistic traces of depression in topic restricted text: attending to self-stigmatized depression with NLP</article-title>
          <year>2018</year>
          <conf-name>The First International Workshop on Language Cognition and Computational Models</conf-name>
          <conf-date>August 20, 2018</conf-date>
          <conf-loc>Santa Fe, New Mexico</conf-loc>
          <fpage>11</fpage>
          <lpage>21</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tyshchenko</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Depression and anxiety detection from blog posts data</article-title>
          <source>CORE</source>
          <year>2018</year>
          <access-date>2021-06-28</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://core.ac.uk/download/pdf/237085027.pdf">https://core.ac.uk/download/pdf/237085027.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Orabi</surname>
              <given-names>AH</given-names>
            </name>
            <name name-style="western">
              <surname>Buddhitha</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Orabi</surname>
              <given-names>MH</given-names>
            </name>
            <name name-style="western">
              <surname>Inkpen</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Deep learning for depression detection of Twitter users</article-title>
          <year>2018</year>
          <conf-name>Fifth Workshop on Computational Linguistics and Clinical Psychology: From Keyboard to Clinic</conf-name>
          <conf-date>June 2018</conf-date>
          <conf-loc>New Orleans, LA</conf-loc>
          <fpage>88</fpage>
          <lpage>97</lpage>
          <pub-id pub-id-type="doi">10.18653/v1/w18-0609</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gui</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Ji</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Depression detection on social media with reinforcement learning</article-title>
          <source>Chinese Computational Linguistics 18th China National Conference, CCL 2019, Kunming, China, October 18–20, 2019, Proceedings</source>
          <year>2019</year>
          <publisher-loc>Cham</publisher-loc>
          <publisher-name>Springer</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>De Choudhury</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gamon</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Counts</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Horvitz</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Predicting depression via social media</article-title>
          <year>2013</year>
          <conf-name>Seventh International AAAI Conference on Weblogs and Social Media</conf-name>
          <conf-date>July 8-11, 2013</conf-date>
          <conf-loc>Cambridge, MA</conf-loc>
          <fpage>1</fpage>
          <lpage>10</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Park</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>McDonald</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Cha</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Perception differences between the depressed and non-depressed users in Twitter</article-title>
          <year>2013</year>
          <conf-name>Seventh International AAAI Conference on Weblogs and Social Media</conf-name>
          <conf-date>July 8-11, 2013</conf-date>
          <conf-loc>Cambridge, MA</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kang</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Yoon</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>EY</given-names>
            </name>
          </person-group>
          <article-title>Identifying depressive users in twitter using multimodal analysis</article-title>
          <year>2016</year>
          <conf-name>International Conference on Big Data and Smart Computing (BigComp)</conf-name>
          <conf-date>January 18-20, 2016</conf-date>
          <conf-loc>Hong Kong, China</conf-loc>
          <fpage>231</fpage>
          <lpage>238</lpage>
          <pub-id pub-id-type="doi">10.1109/bigcomp.2016.7425918</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Borth</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ji</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Breuel</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>SF</given-names>
            </name>
          </person-group>
          <article-title>Large-scale visual sentiment ontology and detectors using adjective noun pairs</article-title>
          <source>Proceedings of the 21st ACM International Conference on Multimedia</source>
          <year>2013</year>
          <conf-name>MM '13</conf-name>
          <conf-date>October 21-25, 2013</conf-date>
          <conf-loc>Barcelona, Spain</conf-loc>
          <pub-id pub-id-type="doi">10.1145/2502081.2502282</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shen</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Jia</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Nie</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Feng</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Chua</surname>
              <given-names>TS</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Depression detection via harvesting social media: a multimodal dictionary learning solution</article-title>
          <source>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence</source>
          <year>2017</year>
          <conf-name>IJCAI-17</conf-name>
          <conf-date>August 19-25, 2017</conf-date>
          <conf-loc>Melbourne, Australia</conf-loc>
          <fpage>3838</fpage>
          <lpage>3844</lpage>
          <pub-id pub-id-type="doi">10.24963/ijcai.2017/536</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hiraga</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Predicting depression for Japanese blog text</article-title>
          <year>2017</year>
          <conf-name>ACL 2017, Student Research Workshop</conf-name>
          <conf-date>July 2017</conf-date>
          <conf-loc>Vancouver, Canada</conf-loc>
          <fpage>107</fpage>
          <lpage>113</lpage>
          <pub-id pub-id-type="doi">10.18653/v1/p17-3018</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shneidman</surname>
              <given-names>ES</given-names>
            </name>
          </person-group>
          <article-title>Suicide as psychache</article-title>
          <source>J Nerv Ment Dis</source>
          <year>1993</year>
          <month>03</month>
          <volume>181</volume>
          <issue>3</issue>
          <fpage>145</fpage>
          <lpage>7</lpage>
          <pub-id pub-id-type="doi">10.1097/00005053-199303000-00001</pub-id>
          <pub-id pub-id-type="medline">8445372</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>De Choudhury</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kiciman</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Dredze</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Coppersmith</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Kumar</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Discovering shifts to suicidal ideation from mental health content in social media</article-title>
          <source>Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems</source>
          <year>2016</year>
          <conf-name>CHI '16</conf-name>
          <conf-date>May 7-12, 2016</conf-date>
          <conf-loc>San Jose, CA</conf-loc>
          <fpage>2098</fpage>
          <lpage>2110</lpage>
          <pub-id pub-id-type="doi">10.1145/2858036.2858207</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yates</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Cohan</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Goharian</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Depression and self-harm risk assessment in online forums</article-title>
          <source>arXiv.</source>
          <comment>Preprint posted online on September 6, 2017
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/1709.01848">https://arxiv.org/abs/1709.01848</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Losada</surname>
              <given-names>DE</given-names>
            </name>
            <name name-style="western">
              <surname>Crestani</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Parapar</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Jones</surname>
              <given-names>GJF</given-names>
            </name>
            <name name-style="western">
              <surname>Lawless</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gonzalo</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kelly</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Goeuriot</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Mandl</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Cappellato</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Ferro</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>eRISK 2017: CLEF lab on early risk prediction on the internet: experimental foundations</article-title>
          <source>Experimental IR Meets Multilinguality, Multimodality, and Interaction 8th International Conference of the CLEF Association, CLEF 2017, Dublin, Ireland, September 11–14, 2017, Proceedings</source>
          <year>2017</year>
          <publisher-loc>Cham</publisher-loc>
          <publisher-name>Springer</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Trotzek</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Koitka</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Friedrich</surname>
              <given-names>CM</given-names>
            </name>
          </person-group>
          <article-title>Utilizing neural networks and linguistic metadata for early detection of depression indications in text sequences</article-title>
          <source>IEEE Trans Knowledge Data Eng</source>
          <year>2020</year>
          <month>3</month>
          <day>1</day>
          <volume>32</volume>
          <issue>3</issue>
          <fpage>588</fpage>
          <lpage>601</lpage>
          <pub-id pub-id-type="doi">10.1109/tkde.2018.2885515</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Losada</surname>
              <given-names>DE</given-names>
            </name>
            <name name-style="western">
              <surname>Crestani</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Parapar</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Crestani</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Braschler</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Savoy</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rauber</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Müller</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Losada</surname>
              <given-names>DE</given-names>
            </name>
            <name name-style="western">
              <surname>Bürki</surname>
              <given-names>GH</given-names>
            </name>
            <name name-style="western">
              <surname>Cappellato</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Ferro</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Overview of eRisk 2019 early risk prediction on the internet</article-title>
          <source>Experimental IR Meets Multilinguality, Multimodality, and Interaction: 10th International Conference of the CLEF Association, CLEF 2019, Lugano, Switzerland, September 9–12, 2019, Proceedings</source>
          <year>2019</year>
          <publisher-loc>Cham</publisher-loc>
          <publisher-name>Springer</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Song</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>You</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chung</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>JC</given-names>
            </name>
          </person-group>
          <article-title>Feature attention network: interpretable depression detection from social media</article-title>
          <year>2018</year>
          <conf-name>32nd Pacific Asia Conference on Language, Information and Computation</conf-name>
          <conf-date>December 1-3, 2018</conf-date>
          <conf-loc>Hong Kong, China</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ray</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kumar</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Reddy</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Mukherjee</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Garg</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Multi-level attention network using text, audio and video for depression prediction</article-title>
          <source>Proceedings of the 9th International on Audio/Visual Emotion Challenge and Workshop</source>
          <year>2019</year>
          <conf-name>AVEC '19</conf-name>
          <conf-date>October 21, 2019</conf-date>
          <conf-loc>Nice, France</conf-loc>
          <fpage>81</fpage>
          <lpage>88</lpage>
          <pub-id pub-id-type="doi">10.1145/3347320.3357697</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pennington</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Socher</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Manning</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>GloVe: global vectors for word representation</article-title>
          <source>Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing</source>
          <year>2014</year>
          <conf-name>EMNLP '14</conf-name>
          <conf-date>October 2014</conf-date>
          <conf-loc>Doha, Qatar</conf-loc>
          <fpage>1532</fpage>
          <lpage>1543</lpage>
          <pub-id pub-id-type="doi">10.3115/v1/d14-1162</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Graves</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Jaitly</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Mohamed</surname>
              <given-names>AR</given-names>
            </name>
          </person-group>
          <article-title>Hybrid speech recognition with Deep Bidirectional LSTM</article-title>
          <year>2013</year>
          <conf-name>2013 IEEE Workshop on Automatic Speech Recognition and Understanding</conf-name>
          <conf-date>December 8-12, 2013</conf-date>
          <conf-loc>Olomouc, Czech Republic</conf-loc>
          <fpage>273</fpage>
          <lpage>278</lpage>
          <pub-id pub-id-type="doi">10.1109/asru.2013.6707742</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hochreiter</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Schmidhuber</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Long short-term memory</article-title>
          <source>Neural Comput</source>
          <year>1997</year>
          <month>11</month>
          <day>15</day>
          <volume>9</volume>
          <issue>8</issue>
          <fpage>1735</fpage>
          <lpage>80</lpage>
          <pub-id pub-id-type="doi">10.1162/neco.1997.9.8.1735</pub-id>
          <pub-id pub-id-type="medline">9377276</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pennebaker</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Booth</surname>
              <given-names>RJ</given-names>
            </name>
            <name name-style="western">
              <surname>Boyd</surname>
              <given-names>RL</given-names>
            </name>
            <name name-style="western">
              <surname>Francis</surname>
              <given-names>ME</given-names>
            </name>
          </person-group>
          <article-title>Linguistic Inquiry and Word Count: LIWC2015</article-title>
          <source>Pennebaker Conglomerates</source>
          <year>2001</year>
          <access-date>2021-06-28</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://downloads.liwc.net.s3.amazonaws.com/LIWC2015_OperatorManual.pdf">http://downloads.liwc.net.s3.amazonaws.com/LIWC2015_OperatorManual.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
