WASET
	@article{(Open Science Index):https://publications.waset.org/pdf/10006330,
	  title     = {Sound Selection for Gesture Sonification and Manipulation of Virtual Objects},
	  author    = {Benjamin Bressolette and  S´ebastien Denjean and  Vincent Roussarie and  Mitsuko Aramaki and  Sølvi Ystad and  Richard Kronland-Martinet},
	  country	= {},
	  institution	= {},
	  abstract     = {New sensors and technologies – such as microphones,
touchscreens or infrared sensors – are currently making their
appearance in the automotive sector, introducing new kinds of
Human-Machine Interfaces (HMIs). The interactions with such tools
might be cognitively expensive, thus unsuitable for driving tasks.
It could for instance be dangerous to use touchscreens with a
visual feedback while driving, as it distracts the driver’s visual
attention away from the road. Furthermore, new technologies in
car cockpits modify the interactions of the users with the central
system. In particular, touchscreens are preferred to arrays of buttons
for space improvement and design purposes. However, the buttons’
tactile feedback is no more available to the driver, which makes
such interfaces more difficult to manipulate while driving. Gestures
combined with an auditory feedback might therefore constitute an
interesting alternative to interact with the HMI. Indeed, gestures can
be performed without vision, which means that the driver’s visual
attention can be totally dedicated to the driving task. In fact, the
auditory feedback can both inform the driver with respect to the task
performed on the interface and on the performed gesture, which might
constitute a possible solution to the lack of tactile information. As
audition is a relatively unused sense in automotive contexts, gesture
sonification can contribute to reducing the cognitive load thanks
to the proposed multisensory exploitation. Our approach consists
in using a virtual object (VO) to sonify the consequences of the
gesture rather than the gesture itself. This approach is motivated
by an ecological point of view: Gestures do not make sound, but
their consequences do. In this experiment, the aim was to identify
efficient sound strategies, to transmit dynamic information of VOs to
users through sound. The swipe gesture was chosen for this purpose,
as it is commonly used in current and new interfaces. We chose
two VO parameters to sonify, the hand-VO distance and the VO
velocity. Two kinds of sound parameters can be chosen to sonify the
VO behavior: Spectral or temporal parameters. Pitch and brightness
were tested as spectral parameters, and amplitude modulation as a
temporal parameter. Performances showed a positive effect of sound
compared to a no-sound situation, revealing the usefulness of sounds
to accomplish the task.},
	    journal   = {International Journal of Mechanical and Mechatronics Engineering},
	  volume    = {11},
	  number    = {1},
	  year      = {2017},
	  pages     = {147 - 152},
	  ee        = {https://publications.waset.org/pdf/10006330},
	  url   	= {https://publications.waset.org/vol/121},
	  bibsource = {https://publications.waset.org/},
	  issn  	= {eISSN: 1307-6892},
	  publisher = {World Academy of Science, Engineering and Technology},
	  index 	= {Open Science Index 121, 2017},
	}