<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0 plus SVG 1.1//EN" "http://www.w3.org/2002/04/xhtml-math-svg/xhtml-math-svg.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
  <head>
    <meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/>
    <title>Project-Team:MULTISPEECH</title>
    <link rel="stylesheet" href="../static/css/raweb.css" type="text/css"/>
    <meta name="description" content="Speech Modeling for Facilitating Oral-Based Communication"/>
    <meta name="dc.title" content="Speech Modeling for Facilitating Oral-Based Communication"/>
    <meta name="dc.subject" content=""/>
    <meta name="dc.publisher" content="INRIA"/>
    <meta name="dc.date" content="(SCHEME=ISO8601) 2019-01"/>
    <meta name="dc.type" content="Report"/>
    <meta name="dc.language" content="(SCHEME=ISO639-1) en"/>
    <meta name="projet" content="MULTISPEECH"/>
    <script type="text/javascript" src="https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-MML-AM_CHTML">
      <!-- MathJax -->
    </script>
    <script type="text/javascript" src="../static/js/piwik.js">
      <!-- Piwik JS -->
    </script>
    <noscript>
      <p>
        <img src="https://piwik.inria.fr/matomo.php?idsite=49&amp;rec=1" style="border:0;" alt=""/>
      </p>
      <!-- Piwik Img -->
    </noscript>
  </head>
  <body>
    <div class="tdmdiv">
      <div class="logo">
        <a href="http://www.inria.fr">
          <img style="align:bottom; border:none" src="../static/img/icons/logo_INRIA-coul.jpg" alt="Inria"/>
        </a>
      </div>
      <div class="TdmEntry">
        <div class="tdmentete">
          <a href="uid0.html">Project-Team Multispeech</a>
        </div>
        <span>
          <a href="uid1.html">Team, Visitors, External Collaborators</a>
        </span>
      </div>
      <div class="TdmEntry">
        <a href="./uid3.html">Overall Objectives</a>
      </div>
      <div class="TdmEntry">Research Program<ul><li><a href="uid11.html&#10;&#9;&#9;  ">Beyond black-box supervised learning</a></li><li><a href="uid15.html&#10;&#9;&#9;  ">Speech production and perception</a></li><li><a href="uid20.html&#10;&#9;&#9;  ">Speech in its environment</a></li></ul></div>
      <div class="TdmEntry">Application Domains<ul><li><a href="uid25.html&#10;&#9;&#9;  ">Introduction</a></li><li><a href="uid26.html&#10;&#9;&#9;  ">Multimodal Computer Interactions</a></li><li><a href="uid27.html&#10;&#9;&#9;  ">Annotation and Processing of Spoken Documents and Audio Archives</a></li><li><a href="uid28.html&#10;&#9;&#9;  ">Aided Communication and Monitoring</a></li><li><a href="uid29.html&#10;&#9;&#9;  ">Computer Assisted Learning</a></li></ul></div>
      <div class="TdmEntry">
        <a href="./uid31.html">Highlights of the Year</a>
      </div>
      <div class="TdmEntry">New Software and Platforms<ul><li><a href="uid34.html&#10;&#9;&#9;  ">dnnsep</a></li><li><a href="uid37.html&#10;&#9;&#9;  ">KATS</a></li><li><a href="uid40.html&#10;&#9;&#9;  ">SOJA</a></li><li><a href="uid43.html&#10;&#9;&#9;  ">LORIA-PHON</a></li><li><a href="uid46.html&#10;&#9;&#9;  ">Dynalips-Player</a></li><li><a href="uid50.html&#10;&#9;&#9;  ">VisArtico</a></li><li><a href="uid56.html&#10;&#9;&#9;  ">Xarticulators</a></li><li><a href="uid59.html&#10;&#9;&#9;  ">DCASE 2019 baseline</a></li></ul></div>
      <div class="TdmEntry">New Results<ul><li><a href="uid65.html&#10;&#9;&#9;  ">Beyond black-box supervised learning</a></li><li><a href="uid69.html&#10;&#9;&#9;  ">Speech production and perception</a></li><li><a href="uid81.html&#10;&#9;&#9;  ">Speech in its environment</a></li></ul></div>
      <div class="TdmEntry">Bilateral Contracts and Grants with Industry<ul><li><a href="uid92.html&#10;&#9;&#9;  ">Bilateral Contracts with Industry</a></li><li><a href="uid109.html&#10;&#9;&#9;  ">Bilateral Grants with Industry</a></li></ul></div>
      <div class="TdmEntry">Partnerships and Cooperations<ul><li><a href="uid131.html&#10;&#9;&#9;  ">Regional Initiatives</a></li><li><a href="uid151.html&#10;&#9;&#9;  ">National Initiatives</a></li><li><a href="uid273.html&#10;&#9;&#9;  ">European Initiatives</a></li><li><a href="uid321.html&#10;&#9;&#9;  ">International Initiatives</a></li><li><a href="uid329.html&#10;&#9;&#9;  ">International Research Visitors</a></li></ul></div>
      <div class="TdmEntry">Dissemination<ul><li><a href="uid334.html&#10;&#9;&#9;  ">Promoting Scientific Activities</a></li><li><a href="uid430.html&#10;&#9;&#9;  ">Teaching - Supervision - Juries</a></li><li><a href="uid508.html&#10;&#9;&#9;  ">Popularization</a></li></ul></div>
      <div class="TdmEntry">
        <div>Bibliography</div>
      </div>
      <div class="TdmEntry">
        <ul>
          <li>
            <a id="tdmbibentyear" href="bibliography.html">Publications of the year</a>
          </li>
        </ul>
      </div>
    </div>
    <div id="main">
      <div class="mainentete">
        <div id="head_agauche">
          <small><a href="http://www.inria.fr">
	    
	    Inria
	  </a> | <a href="../index.html">
	    
	    Raweb 
	    2019</a> | <a href="http://www.inria.fr/en/teams/multispeech">Presentation of the Project-Team MULTISPEECH</a> | <a href="https://team.inria.fr/multispeech/">MULTISPEECH Web Site
	  </a></small>
        </div>
        <div id="head_adroite">
          <table class="qrcode">
            <tr>
              <td>
                <a href="multispeech.xml">
                  <img style="align:bottom; border:none" alt="XML" src="../static/img/icons/xml_motif.png"/>
                </a>
              </td>
              <td>
                <a href="multispeech.pdf">
                  <img style="align:bottom; border:none" alt="PDF" src="IMG/qrcode-multispeech-pdf.png"/>
                </a>
              </td>
              <td>
                <a href="../multispeech/multispeech.epub">
                  <img style="align:bottom; border:none" alt="e-pub" src="IMG/qrcode-multispeech-epub.png"/>
                </a>
              </td>
            </tr>
            <tr>
              <td/>
              <td>PDF
</td>
              <td>e-Pub
</td>
            </tr>
          </table>
        </div>
      </div>
      <!--FIN du corps du module-->
      <br/>
      <div class="bottomNavigation">
        <div class="tail_aucentre">
          <a href="./uid0.html" accesskey="U"><img style="align:bottom; border:none" alt="up" src="../static/img/icons/up_motif.jpg"/>  Home</a>
          <a href="./uid1.html" accesskey="N"> | Next <img style="align:bottom; border:none" alt="next" src="../static/img/icons/next_motif.jpg"/></a>
        </div>
        <br/>
      </div>
      <!--DEBUT1 du corps du module-->
      <div class="mainpage1">
        <div class="Titrepage1">2019 Project-Team Activity Report
	</div>
        <div class="Projetpage1">
          <div class="ProjetCourtpage1">MULTISPEECH</div>
          <div class="ProjetLongpage1">Speech Modeling for Facilitating Oral-Based Communication<div class="DescriptionTeam"/></div>
        </div>
        <div class="CRpage1">
          <span class="definition">Research centre: </span>
          <a href="http://www.inria.fr/centre/nancy">Nancy - Grand Est</a>
        </div>
        <div class="partner"><span class="definition">In partnership with: </span>CNRS, Université de Lorraine<br/><span class="definition">In collaboration with: </span>Laboratoire lorrain de recherche en informatique et ses applications (LORIA)<br/><br/></div>
        <div class="domainepage1"><span class="definition">Field: </span><a href="&#10;&#9;      http://www.inria.fr/en/domains/Perception-Cognition-and-Interaction">Perception, Cognition and Interaction</a><br/><span class="definition">Theme: </span>Language, Speech and Audio</div>
        <div class="Keywordspage">
          <span class="definition">Keywords: </span>
        </div>
        <div class="Keywordspage1">
          <span class="definition2">
            <a href="/keywords/2019/computing">Computer Science and Digital Science: </a>
          </span>
          <ul>
            <li>A3.4.6. - Neural networks</li>
            <li>A3.4.8. - Deep learning</li>
            <li>A3.5. - Social networks</li>
            <li>A4.8. - Privacy-enhancing technologies</li>
            <li>A5.1.7. - Multimodal interfaces</li>
            <li>A5.7.1. - Sound</li>
            <li>A5.7.3. - Speech</li>
            <li>A5.7.4. - Analysis</li>
            <li>A5.7.5. - Synthesis</li>
            <li>A5.8. - Natural language processing</li>
            <li>A5.9.1. - Sampling, acquisition</li>
            <li>A5.9.2. - Estimation, modeling</li>
            <li>A5.9.3. - Reconstruction, enhancement</li>
            <li>A5.9.5. - Sparsity-aware processing</li>
            <li>A5.10.2. - Perception</li>
            <li>A5.11.2. - Home/building control and interaction</li>
            <li>A6.2.4. - Statistical methods</li>
            <li>A6.3.1. - Inverse problems</li>
            <li>A6.3.5. - Uncertainty Quantification</li>
            <li>A9.2. - Machine learning</li>
            <li>A9.3. - Signal analysis</li>
            <li>A9.4. - Natural language processing</li>
            <li>A9.5. - Robotics</li>
          </ul>
        </div>
        <div class="Keywordspage2">
          <span class="definition2">
            <a href="/keywords/2019/other">Other Research Topics and Application Domains: </a>
          </span>
          <ul>
            <li>B8.1.2. - Sensor networks for smart buildings</li>
            <li>B8.4. - Security and personal assistance</li>
            <li>B9.1.1. - E-learning, MOOC</li>
            <li>B9.5.1. - Computer science</li>
            <li>B9.5.2. - Mathematics</li>
            <li>B9.5.6. - Data science</li>
            <li>B9.6.8. - Linguistics</li>
            <li>B9.6.10. - Digital humanities</li>
          </ul>
        </div>
      </div>
    </div>
  </body>
</html>
