publications([{ "lang": "fr", "type_publi": "icolcomlec", "doi": "https://doi.org/None", "title": "Microgesture Interaction in Context: demonstrations of the ANR MIC project Interaction par microgeste en contexte : démonstrations du projet ANR MIC", "url": "https://hal.science/hal-05311866", "abstract": "We present demonstrations from the ANR MIC project. MIC aims at studying and promoting microgesture-based interaction by putting it in practice in use situations. The demontrations show interaction techniques based on microgestures or on the combination of microgestures with another modality including haptic feedback as well as mechanisms that support discoverability and learnability of microgestures. The demonstrations illustrate three different contexts of use: 1) Augmented/Virtual Reality because microgesture interaction does not require us to hold any external device and is less physically demanding than mid-air interaction. 2) Car driving because microgestures may be performed in parallel with other tasks, they only require a few seconds and only one hand. 3) Eyes-free interaction (i.e. users with visual impairments) because users can perform microgestures by relying on proprioception only, without looking at their hand.", "authors": { "1": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "2": { "first_name": "Aurélien", "last_name": "Conil" }, "3": { "first_name": "Alix", "last_name": "Goguey" }, "4": { "first_name": "Vincent", "last_name": "Lambert" }, "5": { "first_name": "Laurence", "last_name": "Nigay" }, "6": { "first_name": "Charles", "last_name": "Bailly" }, "7": { "first_name": "Julien", "last_name": "Castet" }, "8": { "first_name": "Michael", "last_name": "Ortega" }, "9": { "first_name": "Zoé", "last_name": "Lacroux" }, "10": { "first_name": "Céline", "last_name": "Lemercier" }, "11": { "first_name": "Pierre-Vincent", "last_name": "Paubel" }, "12": { "first_name": "Sandra", "last_name": "Bardot" }, "13": { "first_name": "Christophe", "last_name": "Jouffrais" }, "14": { "first_name": "Suliac", "last_name": "Lavenant" }, "15": { "first_name": "Sylvain", "last_name": "Malacria" }, "16": { "first_name": "Thomas", "last_name": "Pietrzak" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/CCG+25a/", "id": 1000, "bibtype": "inproceedings", "abbr": "CCG+25a", "address": "Toulouse, France", "date": "2025-11-03", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "IHM'25 - 36e Conférence Internationale Francophone sur l'Interaction Humain-Machine" }, { "lang": "en", "type_publi": "icolcomlec", "doi": "https://doi.org/None", "title": "Studying the Perception of Vibrotactile Haptic Cues on the Finger, Hand and Forearm for Representing Microgestures", "url": "https://inria.hal.science/hal-04680841", "abstract": "We explore the use of vibrotactile haptic cues for representing microgestures. We built a four-axes haptic device for providing vibrotactile cues mapped to all four fingers. We also designed six patterns, inspired by six most commonly studied microgestures. The patterns can be played independently on each axis of the device. We ran an experiment with 36 participants testing three different device locations (fingers, back of the hand, and forearm) for pattern and axis recognition. For all three device locations, participants interpreted the patterns with similar accuracy. We also found that they were better at distinguishing the axes when the device is placed on the fingers. Hand and Forearm device locations remain suitable alternatives but involve a greater trade-off between recognition rate and expressiveness. We report the recognition rates obtained for the different patterns, axes and their combinations per device location. These results per device location are important, as constraints of various kinds, such as hardware, context of use and user activities, influence device location. We discuss this choice of device location by improving literature microgesture-based scenarios with haptic feedback or feedforward.", "authors": { "1": { "first_name": "Suliac", "last_name": "Lavenant" }, "2": { "first_name": "Alix", "last_name": "Goguey" }, "3": { "first_name": "Sylvain", "last_name": "Malacria" }, "4": { "first_name": "Laurence", "last_name": "Nigay" }, "5": { "first_name": "Thomas", "last_name": "Pietrzak" } }, "year": 2024, "uri": "http://iihm.imag.fr/publication/LGM+24a/", "id": 978, "bibtype": "inproceedings", "abbr": "LGM+24a", "address": "Bellevue, WA, United States", "date": "2024-10-21", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "Proceedings of the IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2024)" }, { "lang": "en", "type_publi": "icolcomlec", "doi": "https://doi.org/10.1145/3676523", "title": "Studying the Simultaneous Visual Representation of Microgestures", "url": "https://inria.hal.science/hal-04672513", "abstract": "Hand microgestures are promising for mobile interaction with wearable devices. However, they will not be adopted if practitioners cannot communicate to users the microgestures associated with the commands of their applications. This requires unambiguous representations that simultaneously show the multiple microgestures available to control an application. Using a systematic approach, we evaluate how these representations should be designed and contrast 4 conditions depending on the microgestures (tap-swipe and tap-hold) and fingers (index and index-middle) considered. Based on the results, we design a simultaneous representation of microgestures for a given set of 14 application commands. We then evaluate the usability of the representation for novice users and the suitability of the representation for small screens compared with a baseline. Finally, we formulate 8 recommendations based on the results of all the experiments. In particular, redundant graphical and textual representations of microgestures should only be displayed for novice users.", "authors": { "1": { "first_name": "Vincent", "last_name": "Lambert" }, "2": { "first_name": "Alix", "last_name": "Goguey" }, "3": { "first_name": "Sylvain", "last_name": "Malacria" }, "4": { "first_name": "Laurence", "last_name": "Nigay" } }, "year": 2024, "uri": "http://iihm.imag.fr/publication/LGM+24b/", "id": 979, "bibtype": "inproceedings", "abbr": "LGM+24b", "address": "Melbourne, Australia", "date": "2024-09-30", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "Proceedings of the ACM International Conference on Mobile Human-Computer Interaction (MobileHCI 2024)" }, { "lang": "en", "type_publi": "icolcomlec", "doi": "https://doi.org/10.1145/3604272", "title": "Studying the Visual Representation of Microgestures", "url": "https://hal.science/hal-04193374", "abstract": "The representations of microgestures are essentials for researchers presenting their results through academic papers and system designers proposing tutorials to novice users. However, those representations remain disparate and inconsistent. As a first attempt to investigate how to best graphically represent microgestures, we created 21 designs, each depicting static and dynamic versions of 4 commonly used microgestures (tap, swipe, flex and hold). We first studied these designs in a quantitative online experiment with 45 participants. We then conducted a qualitative laboratory experiment in Augmented Reality with 16 participants. Based on the results, we provide design guidelines on which elements of a microgesture should be represented and how. In particular, it is recommended to represent the actuator and the trajectory of a microgesture. Also, although preferred by users, dynamic representations are not considered better than their static counterparts for depicting a microgesture and do not necessarily result in a better user recognition", "authors": { "1": { "first_name": "Vincent", "last_name": "Lambert" }, "2": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "3": { "first_name": "Alix", "last_name": "Goguey" }, "4": { "first_name": "Sylvain", "last_name": "Malacria" }, "5": { "first_name": "Laurence", "last_name": "Nigay" } }, "year": 2023, "uri": "http://iihm.imag.fr/publication/LCG+23a/", "id": 961, "bibtype": "inproceedings", "abbr": "LCG+23a", "address": "Athens, Greece", "date": "2023-09-25", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "ACM International Conference on Mobile Human-Computer Interaction (MobileHCI 2023)" }, { "lang": "en", "publisher": "ACM", "doi": "https://doi.org/10.1145/3366550.3372247", "title": "Reducing Error Aversion to Support Novice-to-Expert Transitions with FastTap", "url": "https://hal.archives-ouvertes.fr/hal-02381584", "abstract": "Expert interaction techniques such as gestures or hotkeys are more efficient than traditional WIMP techniques because it is often faster to recall a command than to navigate to it. However, many users seem to be reluctant to switch to expert interaction. We hypothesize the cause might be the aversion of making errors. To test this, we designed two intermediate modes for the FastTap interaction technique, allowing quick confirmation of what the user has retrieved from memory, and quick adjustment if she has made an error. We investigated the impact of these modes and of various error costs in a controlled study (N=36). We found that participants adopted the intermediate modes, that these modes reduced error rate when error cost was high, and that they did not substantially change selection times. However, while it validates the design of our intermediate modes, we found no evidence of greater switch to memory-based interaction, suggesting that reducing the error rate is not sufficient to promote expert use of techniques.", "authors": { "1": { "first_name": "Alix", "last_name": "Goguey" }, "2": { "first_name": "Sylvain", "last_name": "Malacria" }, "3": { "first_name": "Andy", "last_name": "Cockburn" }, "4": { "first_name": "Carl", "last_name": "Gutwin" } }, "year": 2019, "uri": "http://iihm.imag.fr/publication/GMC+19a/", "pages": "1:1-10", "bibtype": "inproceedings", "id": 908, "abbr": "GMC+19a", "address": "Grenoble, France", "date": "2019-12-10", "type": "Conférences nationales avec comité de lecture sur texte complet", "booktitle": "Actes de la 31e conférence francophone sur l'Interaction Homme-Machine (IHM 2019)", "type_publi": "colcomlec" }]);