<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0 plus SVG 1.1//EN" "http://www.w3.org/2002/04/xhtml-math-svg/xhtml-math-svg.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
  <head>
    <meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/>
    <title>Project-Team:PERCEPTION</title>
    <link rel="stylesheet" href="../static/css/raweb.css" type="text/css"/>
    <meta name="description" content="Highlights of the Year - Highlights of the Year"/>
    <meta name="dc.title" content="Highlights of the Year - Highlights of the Year"/>
    <meta name="dc.subject" content=""/>
    <meta name="dc.publisher" content="INRIA"/>
    <meta name="dc.date" content="(SCHEME=ISO8601) 2017-01"/>
    <meta name="dc.type" content="Report"/>
    <meta name="dc.language" content="(SCHEME=ISO639-1) en"/>
    <meta name="projet" content="PERCEPTION"/>
    <script type="text/javascript" src="https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-MML-AM_CHTML">
      <!--MathJax-->
    </script>
  </head>
  <body>
    <div class="tdmdiv">
      <div class="logo">
        <a href="http://www.inria.fr">
          <img style="align:bottom; border:none" src="../static/img/icons/logo_INRIA-coul.jpg" alt="Inria"/>
        </a>
      </div>
      <div class="TdmEntry">
        <div class="tdmentete">
          <a href="uid0.html">Project-Team Perception</a>
        </div>
        <span>
          <a href="uid1.html">Personnel</a>
        </span>
      </div>
      <div class="TdmEntry">
        <a href="./uid3.html">Overall Objectives</a>
      </div>
      <div class="TdmEntry">Research Program<ul><li><a href="uid6.html&#10;&#9;&#9;  ">Audio-Visual Scene Analysis</a></li><li><a href="uid7.html&#10;&#9;&#9;  ">Stereoscopic Vision</a></li><li><a href="uid8.html&#10;&#9;&#9;  ">Audio Signal Processing</a></li><li><a href="uid9.html&#10;&#9;&#9;  ">Visual Reconstruction With Multiple Color and Depth Cameras</a></li><li><a href="uid10.html&#10;&#9;&#9;  ">Registration, Tracking and Recognition of People and Actions</a></li></ul></div>
      <div class="tdmActPage">
        <a href="./uid12.html">Highlights of the Year</a>
      </div>
      <div class="TdmEntry">New Software and Platforms<ul><li><a href="uid20.html&#10;&#9;&#9;  ">ECMPR</a></li><li><a href="uid24.html&#10;&#9;&#9;  ">Mixcam</a></li><li><a href="uid28.html&#10;&#9;&#9;  ">NaoLab</a></li><li><a href="uid32.html&#10;&#9;&#9;  ">Stereo matching and recognition library</a></li><li><a href="uid36.html&#10;&#9;&#9;  ">Platforms</a></li></ul></div>
      <div class="TdmEntry">New Results<ul><li><a href="uid41.html&#10;&#9;&#9;  ">Audio-Source Localization</a></li><li><a href="uid42.html&#10;&#9;&#9;  ">Audio-Source Separation</a></li><li><a href="uid43.html&#10;&#9;&#9;  ">Speech Dereverberation and Noise Reduction</a></li><li><a href="uid44.html&#10;&#9;&#9;  ">Acoustic-Articulatory Mapping</a></li><li><a href="uid45.html&#10;&#9;&#9;  ">Visual Tracking of Multiple Persons</a></li><li><a href="uid47.html&#10;&#9;&#9;  ">Audio-Visual Speaker Tracking and Diarization</a></li><li><a href="uid49.html&#10;&#9;&#9;  ">Head Pose Estimation and Tracking</a></li><li><a href="uid50.html&#10;&#9;&#9;  ">Tracking Eye Gaze and of Visual Focus of Attention</a></li><li><a href="uid52.html&#10;&#9;&#9;  ">Attention-Gated Conditional Random Fields</a></li><li><a href="uid53.html&#10;&#9;&#9;  ">Pooling Local Virality</a></li><li><a href="uid54.html&#10;&#9;&#9;  ">Registration of Multiple Point Sets</a></li></ul></div>
      <div class="TdmEntry">Bilateral Contracts and Grants with Industry<ul><li><a href="uid57.html&#10;&#9;&#9;  ">Bilateral Contracts with Industry</a></li></ul></div>
      <div class="TdmEntry">Partnerships and Cooperations<ul><li><a href="uid59.html&#10;&#9;&#9;  ">European Initiatives</a></li><li><a href="uid69.html&#10;&#9;&#9;  ">International Initiatives</a></li><li><a href="uid78.html&#10;&#9;&#9;  ">International Research Visitors</a></li></ul></div>
      <div class="TdmEntry">Dissemination<ul><li><a href="uid83.html&#10;&#9;&#9;  ">Promoting Scientific Activities</a></li><li><a href="uid98.html&#10;&#9;&#9;  ">Teaching - Supervision - Juries</a></li></ul></div>
      <div class="TdmEntry">
        <div>Bibliography</div>
      </div>
      <div class="TdmEntry">
        <ul>
          <li>
            <a id="tdmbibentmajor" href="bibliography.html">Major publications</a>
          </li>
          <li>
            <a id="tdmbibentyear" href="bibliography.html#year">Publications of the year</a>
          </li>
        </ul>
      </div>
    </div>
    <div id="main">
      <div class="mainentete">
        <div id="head_agauche">
          <small><a href="http://www.inria.fr">
	    
	    Inria
	  </a> | <a href="../index.html">
	    
	    Raweb 
	    2017</a> | <a href="http://www.inria.fr/en/teams/perception">Presentation of the Project-Team PERCEPTION</a> | <a href="http://team.inria.fr/perception">PERCEPTION Web Site
	  </a></small>
        </div>
        <div id="head_adroite">
          <table class="qrcode">
            <tr>
              <td>
                <a href="perception.xml">
                  <img style="align:bottom; border:none" alt="XML" src="../static/img/icons/xml_motif.png"/>
                </a>
              </td>
              <td>
                <a href="perception.pdf">
                  <img style="align:bottom; border:none" alt="PDF" src="IMG/qrcode-perception-pdf.png"/>
                </a>
              </td>
              <td>
                <a href="../perception/perception.epub">
                  <img style="align:bottom; border:none" alt="e-pub" src="IMG/qrcode-perception-epub.png"/>
                </a>
              </td>
            </tr>
            <tr>
              <td/>
              <td>PDF
</td>
              <td>e-Pub
</td>
            </tr>
          </table>
        </div>
      </div>
      <!--FIN du corps du module-->
      <br/>
      <div class="bottomNavigation">
        <div class="tail_aucentre">
          <a href="./uid10.html" accesskey="P"><img style="align:bottom; border:none" alt="previous" src="../static/img/icons/previous_motif.jpg"/> Previous | </a>
          <a href="./uid0.html" accesskey="U"><img style="align:bottom; border:none" alt="up" src="../static/img/icons/up_motif.jpg"/>  Home</a>
          <a href="./uid20.html" accesskey="N"> | Next <img style="align:bottom; border:none" alt="next" src="../static/img/icons/next_motif.jpg"/></a>
        </div>
        <br/>
      </div>
      <div id="textepage">
        <!--DEBUT2 du corps du module-->
        <h2>Section: 
      Highlights of the Year</h2>
        <h3 class="titre3">Highlights of the Year</h3>
        <ul>
          <li>
            <p class="notaparagraph"><a name="uid13"> </a>In collaboration with several partners, PERCEPTION completed the three year EU STREP project EARS (2014-2017). PERCEPTION contributed to audio-source localization using microphone arrays and to the disambiguation of audio information using vision, in particular to discriminate between speaking and silent persons.</p>
            <p class="notaparagraph"><a name="uid13"> </a>Website: <a href="https://robot-ears.eu/">https://robot-ears.eu/</a></p>
          </li>
          <li>
            <p class="notaparagraph"><a name="uid14"> </a>PERCEPTION started and completed a one year collaboration (December 2016 – November 2017) with <b>Samsung Electronics Digital Media and Communications R&amp;D Center</b>, Seoul, Korea. The topic of this collaboration, fully funded by Samsung, was <i>multi-modal methodologies for human-robot interaction</i> (a central topic of the team) and is part of a strategic partnership between Inria and Samsung Electronics. A follow-up of this collaboration is under preparation and it is planned to start soon (February 2018).</p>
          </li>
          <li>
            <p class="notaparagraph"><a name="uid15"> </a>As an ERC Advanced Grant holder, Radu Horaud was awarded a Proof of Concept grant for his project Vision and Hearing in Action Laboratory (VHIALab). The project will develop software packages enabling companion robots to robustly interact with multiple users.</p>
            <p class="notaparagraph"><a name="uid15"> </a>Website: <a href="https://team.inria.fr/perception/projects/poc-vhialab/">https://team.inria.fr/perception/projects/poc-vhialab/</a></p>
          </li>
        </ul>
        <a name="uid16"/>
        <h4 class="titre4">Awards</h4>
        <ul>
          <li>
            <p class="notaparagraph"><a name="uid17"> </a>Israel Dejene Gebru (PhD student) and his co-authors, Christine Evers, Patrick Naylor (both from Imperial College London) and Radu Horaud, received the best paper award at the IEEE Fifth Joint Workshop on Hands-free Speech Communication and Microphone Arrays, San Francisco, USA, 1-3 March 2017, for their paper Audio-visual Tracking by Density Approximation in a Sequential Bayesian Filtering Framework. </p>
          </li>
          <li>
            <p class="notaparagraph"><a name="uid18"> </a>Yutong Ban (PhD student) and his co-authors, Xavier Alameda-Pineda, Fabien Badeig, and Radu Horaud, were among the five finalists of the “Novel Technology Paper Award for Amusement Culture” at the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), Vancouver, Canada, September 2017, for their paper Tracking a Varying Number of People with a Visually-Controlled Robotic Head. </p>
          </li>
        </ul>
        <h6 class="titre4">
        Best Papers Awards:
      </h6>
        <a href="./bibliography.html#perception-2017-bid27">[41]</a>
        <div class="&#10;&#9;          noclass&#10;&#9;        ">
          <dt/>
          <dd>I. <span class="smallcap">Gebru</span>, C. <span class="smallcap">Evers</span>, P. <span class="smallcap">Naylor</span>, R. <span class="smallcap">Horaud</span>.<br/><span class="title-anal">Audio-visual Tracking by Density Approximation in a Sequential Bayesian Filtering Framework</span>, in: <span class="title-mono">IEEE Workshop on Hands-free Speech Communication and Microphone Arrays</span><span>, San Francisco, CA, United States</span>, IEEE Signal Processing Society, March 2017, Best Paper Award. [ <span class="small"><span class="it"> 
DOI : </span>10.1109/HSCMA.2017.7895564</span> ]<br/><a href="https://hal.inria.fr/hal-01452167">https://hal.inria.fr/hal-01452167</a></dd>
        </div>
        <br/>
        <a href="./bibliography.html#perception-2017-bid28">[38]</a>
        <div class="&#10;&#9;          noclass&#10;&#9;        ">
          <dt/>
          <dd>Y. <span class="smallcap">Ban</span>, X. <span class="smallcap">Alameda-Pineda</span>, F. <span class="smallcap">Badeig</span>, S. <span class="smallcap">Ba</span>, R. <span class="smallcap">Horaud</span>.<br/><span class="title-anal">Tracking a Varying Number of People with a Visually-Controlled Robotic Head</span>, in: <span class="title-mono">IEEE/RSJ International Conference on Intelligent Robots and Systems</span><span>, Vancouver, Canada</span>, September 2017.<br/><a href="https://hal.inria.fr/hal-01542987">https://hal.inria.fr/hal-01542987</a></dd>
        </div>
        <br/>
      </div>
      <!--FIN du corps du module-->
      <br/>
      <div class="bottomNavigation">
        <div class="tail_aucentre">
          <a href="./uid10.html" accesskey="P"><img style="align:bottom; border:none" alt="previous" src="../static/img/icons/previous_motif.jpg"/> Previous | </a>
          <a href="./uid0.html" accesskey="U"><img style="align:bottom; border:none" alt="up" src="../static/img/icons/up_motif.jpg"/>  Home</a>
          <a href="./uid20.html" accesskey="N"> | Next <img style="align:bottom; border:none" alt="next" src="../static/img/icons/next_motif.jpg"/></a>
        </div>
        <br/>
      </div>
    </div>
  </body>
</html>
