publications([{ "lang": "fr", "type_publi": "icolcomlec", "doi": "https://doi.org/None", "title": "Microgesture Interaction in Context: demonstrations of the ANR MIC project Interaction par microgeste en contexte : démonstrations du projet ANR MIC", "url": "https://hal.science/hal-05311866", "abstract": "We present demonstrations from the ANR MIC project. MIC aims at studying and promoting microgesture-based interaction by putting it in practice in use situations. The demontrations show interaction techniques based on microgestures or on the combination of microgestures with another modality including haptic feedback as well as mechanisms that support discoverability and learnability of microgestures. The demonstrations illustrate three different contexts of use: 1) Augmented/Virtual Reality because microgesture interaction does not require us to hold any external device and is less physically demanding than mid-air interaction. 2) Car driving because microgestures may be performed in parallel with other tasks, they only require a few seconds and only one hand. 3) Eyes-free interaction (i.e. users with visual impairments) because users can perform microgestures by relying on proprioception only, without looking at their hand.", "authors": { "1": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "2": { "first_name": "Aurélien", "last_name": "Conil" }, "3": { "first_name": "Alix", "last_name": "Goguey" }, "4": { "first_name": "Vincent", "last_name": "Lambert" }, "5": { "first_name": "Laurence", "last_name": "Nigay" }, "6": { "first_name": "Charles", "last_name": "Bailly" }, "7": { "first_name": "Julien", "last_name": "Castet" }, "8": { "first_name": "Michael", "last_name": "Ortega" }, "9": { "first_name": "Zoé", "last_name": "Lacroux" }, "10": { "first_name": "Céline", "last_name": "Lemercier" }, "11": { "first_name": "Pierre-Vincent", "last_name": "Paubel" }, "12": { "first_name": "Sandra", "last_name": "Bardot" }, "13": { "first_name": "Christophe", "last_name": "Jouffrais" }, "14": { "first_name": "Suliac", "last_name": "Lavenant" }, "15": { "first_name": "Sylvain", "last_name": "Malacria" }, "16": { "first_name": "Thomas", "last_name": "Pietrzak" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/CCG+25a/", "id": 1000, "bibtype": "inproceedings", "abbr": "CCG+25a", "address": "Toulouse, France", "date": "2025-11-03", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "IHM'25 - 36e Conférence Internationale Francophone sur l'Interaction Humain-Machine" }, { "lang": "en", "type_publi": "these", "title": "Representation of interactions based on hand microgestures", "url": "https://hal.science/tel-05374372", "abstract": "Microgestures are quick and subtle finger movements. They are becoming increasingly popular and appear particularly promising for mobile interaction with wearable devices such as smartwatches and Augmented Reality (AR) headsets. Previous research has mostly focused on what they can be used for, how to detect them and on which microgestures are relevant according to the end-users’ preferences. Whether in research papers or in the latest Apple Watch features, there are images meant to represent how to perform microgestures. These representations are the most prominent way to present microgestures to the end-users but they often remain disparate and ambiguous. Yet, it is still unclear what is characteristic of a “good” representation of a microgesture and how such representations could be used to create usable help interfaces with more than just 4 or 5 microgestures.In this thesis, we propose a two-part approach: (1) determining how to represent the microgestures that can be performed and (2) designing a help interface for an application using multiple commands associated to microgestures.In the first part, we begin by reviewing the related work. In doing so, we determine a representative set of microgestures (hand-free tap, swipe, flex and hold), classify the existing visual cues used in their representations and explore how help interfaces are usually evaluated in Human-Computer Interaction. Then, we investigate the fundamental aspects of single-picture representations of microgestures, i.e. representations with one microgesture per hand shape. For those, we propose 21 “families”, i.e. groups of representations sharing a common design, and we compare them in two complementary experiments. The first is an online form that allow us to rank families and determine which ones work best for each type ofmicrogesture. The second, is a laboratory experiment with an AR headset that allow us to gather qualitative data on the most promising families. With these results combined, we provide a set of design guidelines and a metric to evaluate the explicitness of a single-picture representation of microgestures.In the second part, we first compare different design strategies that can be used to conceive simultaneous representations of microgestures, i.e. representations that show multiple microgestures on the same hand shape. We then design a simultaneous representation of microgestures with their associated commands for a music player application and test its usability in a Wizard of Oz experiment. We also qualitatively compare its performance on small screens, namely smartwartches and smartphones, with a mosaic of single-picture representations of microgestures. With the results of these three experiments, we provide design recommendations specifically thought for simultaneous representations of microgestures. At this point,we are able to say that even though single-picture representations of microgestures are quicker to understand, simultaneous representations of microgestures can still be understood and used by complete novice users after only a few seconds. Based on the results of the previous chapters, we then conceive and compare 3 help interfaces designed for a smartwatch application. This last experiment allows us to determine the advantages and drawbacks of each help interface and suggest that help interfaces for smartwatch applications should use a swipeable carousel of simultaneous representations of microgestures to foster both the discoverability and learning of the available commands. We also propose a first formalization of the confusion between two microgestures. Finally, we present a Unity package for AR applications and a Python package that both make it possible to create representations of microgestures.", "year": 2025, "uri": "http://iihm.imag.fr/publication/L25a/", "bibtype": "phdthesis", "abbr": "L25a", "authors": { "1": { "first_name": "Vincent", "last_name": "Lambert" } }, "date": "2025-11-13", "type": "Thèses et habilitations", "id": 1007 }, { "lang": "en", "type_publi": "icolcomlec", "doi": "https://doi.org/10.1145/3676523", "title": "Studying the Simultaneous Visual Representation of Microgestures", "url": "https://inria.hal.science/hal-04672513", "abstract": "Hand microgestures are promising for mobile interaction with wearable devices. However, they will not be adopted if practitioners cannot communicate to users the microgestures associated with the commands of their applications. This requires unambiguous representations that simultaneously show the multiple microgestures available to control an application. Using a systematic approach, we evaluate how these representations should be designed and contrast 4 conditions depending on the microgestures (tap-swipe and tap-hold) and fingers (index and index-middle) considered. Based on the results, we design a simultaneous representation of microgestures for a given set of 14 application commands. We then evaluate the usability of the representation for novice users and the suitability of the representation for small screens compared with a baseline. Finally, we formulate 8 recommendations based on the results of all the experiments. In particular, redundant graphical and textual representations of microgestures should only be displayed for novice users.", "authors": { "1": { "first_name": "Vincent", "last_name": "Lambert" }, "2": { "first_name": "Alix", "last_name": "Goguey" }, "3": { "first_name": "Sylvain", "last_name": "Malacria" }, "4": { "first_name": "Laurence", "last_name": "Nigay" } }, "year": 2024, "uri": "http://iihm.imag.fr/publication/LGM+24b/", "id": 979, "bibtype": "inproceedings", "abbr": "LGM+24b", "address": "Melbourne, Australia", "date": "2024-09-30", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "Proceedings of the ACM International Conference on Mobile Human-Computer Interaction (MobileHCI 2024)" }, { "lang": "en", "type_publi": "icolcomlec", "doi": "https://doi.org/10.1145/3604272", "title": "Studying the Visual Representation of Microgestures", "url": "https://hal.science/hal-04193374", "abstract": "The representations of microgestures are essentials for researchers presenting their results through academic papers and system designers proposing tutorials to novice users. However, those representations remain disparate and inconsistent. As a first attempt to investigate how to best graphically represent microgestures, we created 21 designs, each depicting static and dynamic versions of 4 commonly used microgestures (tap, swipe, flex and hold). We first studied these designs in a quantitative online experiment with 45 participants. We then conducted a qualitative laboratory experiment in Augmented Reality with 16 participants. Based on the results, we provide design guidelines on which elements of a microgesture should be represented and how. In particular, it is recommended to represent the actuator and the trajectory of a microgesture. Also, although preferred by users, dynamic representations are not considered better than their static counterparts for depicting a microgesture and do not necessarily result in a better user recognition", "authors": { "1": { "first_name": "Vincent", "last_name": "Lambert" }, "2": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "3": { "first_name": "Alix", "last_name": "Goguey" }, "4": { "first_name": "Sylvain", "last_name": "Malacria" }, "5": { "first_name": "Laurence", "last_name": "Nigay" } }, "year": 2023, "uri": "http://iihm.imag.fr/publication/LCG+23a/", "id": 961, "bibtype": "inproceedings", "abbr": "LCG+23a", "address": "Athens, Greece", "date": "2023-09-25", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "ACM International Conference on Mobile Human-Computer Interaction (MobileHCI 2023)" }]);