publications([{ "lang": "fr", "type_publi": "icolcomlec", "doi": "https://doi.org/None", "title": "Microgesture Interaction in Context: demonstrations of the ANR MIC project Interaction par microgeste en contexte : démonstrations du projet ANR MIC", "url": "https://hal.science/hal-05311866", "abstract": "We present demonstrations from the ANR MIC project. MIC aims at studying and promoting microgesture-based interaction by putting it in practice in use situations. The demontrations show interaction techniques based on microgestures or on the combination of microgestures with another modality including haptic feedback as well as mechanisms that support discoverability and learnability of microgestures. The demonstrations illustrate three different contexts of use: 1) Augmented/Virtual Reality because microgesture interaction does not require us to hold any external device and is less physically demanding than mid-air interaction. 2) Car driving because microgestures may be performed in parallel with other tasks, they only require a few seconds and only one hand. 3) Eyes-free interaction (i.e. users with visual impairments) because users can perform microgestures by relying on proprioception only, without looking at their hand.", "authors": { "1": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "2": { "first_name": "Aurélien", "last_name": "Conil" }, "3": { "first_name": "Alix", "last_name": "Goguey" }, "4": { "first_name": "Vincent", "last_name": "Lambert" }, "5": { "first_name": "Laurence", "last_name": "Nigay" }, "6": { "first_name": "Charles", "last_name": "Bailly" }, "7": { "first_name": "Julien", "last_name": "Castet" }, "8": { "first_name": "Michael", "last_name": "Ortega" }, "9": { "first_name": "Zoé", "last_name": "Lacroux" }, "10": { "first_name": "Céline", "last_name": "Lemercier" }, "11": { "first_name": "Pierre-Vincent", "last_name": "Paubel" }, "12": { "first_name": "Sandra", "last_name": "Bardot" }, "13": { "first_name": "Christophe", "last_name": "Jouffrais" }, "14": { "first_name": "Suliac", "last_name": "Lavenant" }, "15": { "first_name": "Sylvain", "last_name": "Malacria" }, "16": { "first_name": "Thomas", "last_name": "Pietrzak" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/CCG+25a/", "id": 1000, "bibtype": "inproceedings", "abbr": "CCG+25a", "address": "Toulouse, France", "date": "2025-11-03", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "IHM'25 - 36e Conférence Internationale Francophone sur l'Interaction Humain-Machine" }, { "lang": "fr", "type_publi": "icolcomlec", "doi": "https://doi.org/10.1145/3765712.3765725", "title": "Effect of Robotic Modular Interface Assistance Type on Sense of Agency", "url": "https://hal.science/hal-05294835", "abstract": "Robotic modular interfaces, increasingly studied in Human-Computer Interaction, offer assistance to support users in their tasks. However, this assistance can harm the sense of agency (i.e., feeling of control), leading to non-use of the interface or a diminishing sense of responsibility regarding the consequences of users' actions. The impact of robotic modular interface assistance, during a cooperative task, on the user's sense of agency has not yet been studied. In this article, we propose to remedy this through the use of swarm robotic interfaces. We focus on nine levels of assistance, varying system autonomy and module coordination. Our study shows that: (1) the higher the autonomy, the lower the sense of agency, (2) coordination seems to have an impact on the sense of agency, and (3) three types of sense of agency emerge depending on the coordination of the modules.", "authors": { "1": { "first_name": "Ophélie", "last_name": "Jobert" }, "2": { "first_name": "Thibaut", "last_name": "Leone" }, "3": { "first_name": "Alix", "last_name": "Goguey" }, "4": { "first_name": "Bruno", "last_name": "Berberian" }, "5": { "first_name": "Julien", "last_name": "Bourgeois" }, "6": { "first_name": "Céline", "last_name": "Coutrix" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/JLG+25a/", "id": 1002, "bibtype": "inproceedings", "abbr": "JLG+25a", "address": "Toulouse, France", "date": "2025-11-03", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "IHM'25 - 36e Conférence Internationale Francophone sur l'Interaction Humain-Machine" }, { "lang": "en", "publisher": "Elsevier", "type_publi": "irevcomlec", "bibtype": "article", "title": "Microgesture + Grasp: A journey from human capabilities to interaction with microgestures", "url": "https://hal.science/hal-04801105", "abstract": "Microgestures, i.e. fast and subtle finger movements, have shown a high potential for ubiquitous interaction. However, work to-date either focuses on grasp contexts (holding an object) or on the free-hand context (no held object). These two contexts influence the microgestures feasibility. Researchers have created sets of microgesture feasible across the entire taxonomy of everyday grasps. However, those sets include a limited number of microgestures as compared to those for the free-hand context, for which microgestures are distinguished according to fine characteristics such as the part of the finger being touched or the number of fingers used. We present the first study on microgestures feasibility across free-hand and grasp contexts. We also study, for the first time, the use of finer characteristics of a microgesture in grasp context: area surface. Then, we present a set of rules to determine guess the feasibility of a microgesture in a given context without the need of doing time-consuming feasibility studies. In both studies, some microgesture were not feasible across all considered contexts. We are therefore exploring different ways of defining a set of microgestures compatible with free-hand and grasping contexts.", "year": 2025, "uri": "http://iihm.imag.fr/publication/CGN25a/", "id": 972, "volume": 195, "abbr": "CGN25a", "authors": { "1": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "2": { "first_name": "Alix", "last_name": "Goguey" }, "3": { "first_name": "Laurence", "last_name": "Nigay" } }, "date": "2025-01-01", "type": "Revues internationales avec comité de lecture", "journal": "International Journal of Human-Computer Studies" }, { "lang": "en", "type_publi": "icolcomlec", "doi": "https://doi.org/None", "title": "Studying the Perception of Vibrotactile Haptic Cues on the Finger, Hand and Forearm for Representing Microgestures", "url": "https://inria.hal.science/hal-04680841", "abstract": "We explore the use of vibrotactile haptic cues for representing microgestures. We built a four-axes haptic device for providing vibrotactile cues mapped to all four fingers. We also designed six patterns, inspired by six most commonly studied microgestures. The patterns can be played independently on each axis of the device. We ran an experiment with 36 participants testing three different device locations (fingers, back of the hand, and forearm) for pattern and axis recognition. For all three device locations, participants interpreted the patterns with similar accuracy. We also found that they were better at distinguishing the axes when the device is placed on the fingers. Hand and Forearm device locations remain suitable alternatives but involve a greater trade-off between recognition rate and expressiveness. We report the recognition rates obtained for the different patterns, axes and their combinations per device location. These results per device location are important, as constraints of various kinds, such as hardware, context of use and user activities, influence device location. We discuss this choice of device location by improving literature microgesture-based scenarios with haptic feedback or feedforward.", "authors": { "1": { "first_name": "Suliac", "last_name": "Lavenant" }, "2": { "first_name": "Alix", "last_name": "Goguey" }, "3": { "first_name": "Sylvain", "last_name": "Malacria" }, "4": { "first_name": "Laurence", "last_name": "Nigay" }, "5": { "first_name": "Thomas", "last_name": "Pietrzak" } }, "year": 2024, "uri": "http://iihm.imag.fr/publication/LGM+24a/", "id": 978, "bibtype": "inproceedings", "abbr": "LGM+24a", "address": "Bellevue, WA, United States", "date": "2024-10-21", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "Proceedings of the IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2024)" }, { "lang": "en", "type_publi": "icolcomlec", "doi": "https://doi.org/10.1145/3676523", "title": "Studying the Simultaneous Visual Representation of Microgestures", "url": "https://inria.hal.science/hal-04672513", "abstract": "Hand microgestures are promising for mobile interaction with wearable devices. However, they will not be adopted if practitioners cannot communicate to users the microgestures associated with the commands of their applications. This requires unambiguous representations that simultaneously show the multiple microgestures available to control an application. Using a systematic approach, we evaluate how these representations should be designed and contrast 4 conditions depending on the microgestures (tap-swipe and tap-hold) and fingers (index and index-middle) considered. Based on the results, we design a simultaneous representation of microgestures for a given set of 14 application commands. We then evaluate the usability of the representation for novice users and the suitability of the representation for small screens compared with a baseline. Finally, we formulate 8 recommendations based on the results of all the experiments. In particular, redundant graphical and textual representations of microgestures should only be displayed for novice users.", "authors": { "1": { "first_name": "Vincent", "last_name": "Lambert" }, "2": { "first_name": "Alix", "last_name": "Goguey" }, "3": { "first_name": "Sylvain", "last_name": "Malacria" }, "4": { "first_name": "Laurence", "last_name": "Nigay" } }, "year": 2024, "uri": "http://iihm.imag.fr/publication/LGM+24b/", "id": 979, "bibtype": "inproceedings", "abbr": "LGM+24b", "address": "Melbourne, Australia", "date": "2024-09-30", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "Proceedings of the ACM International Conference on Mobile Human-Computer Interaction (MobileHCI 2024)" }, { "lang": "fr", "type_publi": "icolcomlec", "doi": "https://doi.org/None", "title": "µPoly: a Toolkit to Design Microgesture Interaction", "url": "https://hal.science/hal-04499957", "abstract": "Numerous microgesture recognition systems have been proposed. These systems differ in shape, sensor types and recognition algorithms. However, in the absence of a microgesture event standard and a toolbox for microgesture interaction, it is difficult for an interaction designer to easily and quickly test different recognition systems and microgesture sets. We propose μPoly, a toolbox based on μGlyph, a notation for describing microgestures, to address this problem.", "authors": { "1": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "2": { "first_name": "Aurélien", "last_name": "Conil" }, "3": { "first_name": "Alix", "last_name": "Goguey" }, "4": { "first_name": "Laurence", "last_name": "Nigay" } }, "year": 2024, "uri": "http://iihm.imag.fr/publication/CCG+24a/", "id": 987, "bibtype": "inproceedings", "abbr": "CCG+24a", "address": "Paris, France", "date": "2024-03-25", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "IHM'24 - 35e Conférence Internationale Francophone sur l'Interaction Humain-Machine" }, { "lang": "en", "publisher": "Elsevier", "type_publi": "irevcomlec", "bibtype": "article", "title": "Automatically Adapting System Pace Towards User Pace - Empirical Studies", "url": "https://hal.science/hal-04411149", "abstract": "Provides empirical evidence that user preferences for system pace (interface conditions that vary only in the duration of interface timeouts) covary with user pace. • Reveals characteristics of user performance that can be automatically measured by a system as a basis for automatically adapting system pace. • Shows that users converge their rate of interaction towards that of the system. • Empirically demonstrates that fast-paced users prefer an adaptive system pace to a 'one size fits all' static pace.", "year": 2024, "uri": "http://iihm.imag.fr/publication/CGG+24a/", "id": 960, "volume": 185, "abbr": "CGG+24a", "authors": { "1": { "first_name": "Andy", "last_name": "Cockburn" }, "2": { "first_name": "Alix", "last_name": "Goguey" }, "3": { "first_name": "Carl", "last_name": "Gutwin" }, "4": { "first_name": "Zhe", "last_name": "Chen" }, "5": { "first_name": "Pang", "last_name": "Suwanaposee" }, "6": { "first_name": "Stewart", "last_name": "Dowding" } }, "date": "2024-05-01", "type": "Revues internationales avec comité de lecture", "journal": "International Journal of Human-Computer Studies" }, { "lang": "en", "type_publi": "icolcomlec", "doi": "https://doi.org/10.1145/3500866.3516371", "title": "µGlyph: a Microgesture Notation", "url": "https://hal.science/hal-04026125", "abstract": "In the active field of hand microgestures, microgesture descriptions are typically expressed informally and are accompanied by images, leading to ambiguities and contradictions. An important step in moving the field forward is a rigorous basis for precisely describing, comparing, and analyzing microgestures. Towards this goal, we propose µGlyph, a hybrid notation based on a vocabulary of events inspired by finger biomechanics. First, we investigate the expressiveness of µGlyph by building a database of 118 microgestures extracted from the literature. Second, we experimentally explore the usability of µGlyph. Participants correctly read and wrote µGlyph descriptions 90% of the time, as compared to 46% for conventional descriptions. Third we present tools that promote µGlyph usage, including a visual editor with LaTeX export. We finally describe how µGlyph can guide research on designing, developing, and evaluating microgesture interaction. Results demonstrate the strong potential of µGlyph to establish a common ground for microgesture research.", "authors": { "1": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "2": { "first_name": "Alix", "last_name": "Goguey" }, "3": { "first_name": "Laurence", "last_name": "Nigay" } }, "year": 2023, "uri": "http://iihm.imag.fr/publication/CGN23a/", "pages": "3:1-13", "bibtype": "inproceedings", "id": 948, "abbr": "CGN23a", "address": "Hamburg, Germany", "date": "2023-04-23", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems" }, { "lang": "en", "publisher": "IEEE", "doi": "https://doi.org/10.1109/ISMAR59233.2023.00095", "title": "3D Selection in Mixed Reality: Designing a Two-Phase Technique To Reduce Fatigue", "url": "https://hal.science/hal-04297966", "abstract": "Mid-air pointing is widely used for 3D selection in Mixed Reality but leads to arm fatigue. In a first exploratory experiment we study a two-phase design and compare modalities for each phase: mid-air gestures, eye-gaze and microgestures. Results suggest that eye-gaze and microgestures are good candidates to reduce fatigue and improve interaction speed. We therefore propose two 3D selection techniques: Look&MidAir and Look&Micro. Both techniques include a first phase during which users control a cone directed along their eye-gaze. Using the flexion of their non-dominant hand index finger, users pre-select the objects intersecting this cone. If several objects are pre-selected, a disambiguation phase is performed using direct mid-air touch for Look&MidAir or thumb to finger microgestures for Look&Micro. In a second study, we compare both techniques to the standard raycasting technique. Results show that Look&MidAir and Look&Micro perform similarly. However they are 55% faster, perceived easier to use and are less tiring than the baseline. We discuss how the two techniques could be combined for greater flexibility and for object manipulation after selection.", "authors": { "1": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "2": { "first_name": "Alix", "last_name": "Goguey" }, "3": { "first_name": "Laurence", "last_name": "Nigay" } }, "year": 2023, "uri": "http://iihm.imag.fr/publication/CGN23b/", "pages": "800-809", "bibtype": "inproceedings", "id": 955, "abbr": "CGN23b", "address": "Sydney (Australia), Australia", "date": "2023-10-16", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "2023 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "type_publi": "icolcomlec" }, { "lang": "en", "publisher": "ACM: Association for Computing Machinery, New York", "doi": "https://doi.org/10.1145/3577190.3614131", "title": "µGeT: Multimodal eyes-free text selection technique combining touch interaction and microgestures", "url": "https://hal.science/hal-04353214", "abstract": "We present μGeT, a novel multimodal eyes-free text selection technique. μGeT combines touch interaction with microgestures. μGeT is especially suited for People with Visual Impairments (PVI) by expanding the input bandwidth of touchscreen devices, thus shortening the interaction paths for routine tasks. To do so, μGeT extends touch interaction (left/right and up/down flicks) using two simple microgestures: thumb touching either the index or the middle finger. For text selection, the multimodal technique allows us to directly modify the positioning of the two selection handles and the granularity of text selection. Two user studies, one with 9 PVI and one with 8 blindfolded sighted people, compared μGeT with a baseline common technique (VoiceOver like on iPhone). Despite a large variability in performance, the two user studies showed that μGeT is globally faster and yields fewer errors than VoiceOver. A detailed analysis of the interaction trajectories highlights the different strategies adopted by the participants. Beyond text selection, this research shows the potential of combining touch interaction and microgestures for improving the accessibility of touchscreen devices for PVI.", "authors": { "1": { "first_name": "Gauthier", "last_name": "Faisandaz" }, "2": { "first_name": "Alix", "last_name": "Goguey" }, "3": { "first_name": "Christophe", "last_name": "Jouffrais" }, "4": { "first_name": "Laurence", "last_name": "Nigay" } }, "year": 2023, "uri": "http://iihm.imag.fr/publication/FGJ+23a/", "pages": "594-603", "bibtype": "inproceedings", "id": 958, "abbr": "FGJ+23a", "address": "Paris, France", "date": "2023-10-09", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "25th ACM International Conference on Multimodal Interaction Paris (ICMI 2023)", "type_publi": "icolcomlec" }, { "lang": "en", "type_publi": "icolcomlec", "doi": "https://doi.org/10.1145/3604272", "title": "Studying the Visual Representation of Microgestures", "url": "https://hal.science/hal-04193374", "abstract": "The representations of microgestures are essentials for researchers presenting their results through academic papers and system designers proposing tutorials to novice users. However, those representations remain disparate and inconsistent. As a first attempt to investigate how to best graphically represent microgestures, we created 21 designs, each depicting static and dynamic versions of 4 commonly used microgestures (tap, swipe, flex and hold). We first studied these designs in a quantitative online experiment with 45 participants. We then conducted a qualitative laboratory experiment in Augmented Reality with 16 participants. Based on the results, we provide design guidelines on which elements of a microgesture should be represented and how. In particular, it is recommended to represent the actuator and the trajectory of a microgesture. Also, although preferred by users, dynamic representations are not considered better than their static counterparts for depicting a microgesture and do not necessarily result in a better user recognition", "authors": { "1": { "first_name": "Vincent", "last_name": "Lambert" }, "2": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "3": { "first_name": "Alix", "last_name": "Goguey" }, "4": { "first_name": "Sylvain", "last_name": "Malacria" }, "5": { "first_name": "Laurence", "last_name": "Nigay" } }, "year": 2023, "uri": "http://iihm.imag.fr/publication/LCG+23a/", "id": 961, "bibtype": "inproceedings", "abbr": "LCG+23a", "address": "Athens, Greece", "date": "2023-09-25", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "ACM International Conference on Mobile Human-Computer Interaction (MobileHCI 2023)" }, { "lang": "en", "publisher": "Elsevier", "type_publi": "irevcomlec", "bibtype": "article", "title": "Studies and guidelines for two concurrent stroke gestures", "url": "https://hal.science/hal-04031673", "abstract": "This paper investigates thumb-index interaction on touch input devices, and more precisely the potential of two concurrent stroke gestures, i.e. gestures in which two fingers of the same hand concurrently draw one stroke each. We present two fundamental studies, one using such gestures for two-dimensional control, by precisely drawing figures, and the other for command activation, by roughly sketching figures. Results give a first analysis of user performance on 35 gestures with a varying complexity based on numbers of turns and symmetries. All 35 gestures, were grouped into six families. From these results we classify these families and propose new guidelines for designing future mobile interfaces. For instance, favoring anchored gestures (forefinger drawing while the thumb remains still on the surface) to increase input bandwidth when forefinger precision is required.", "year": 2023, "uri": "http://iihm.imag.fr/publication/GO23a/", "id": 951, "volume": 170, "abbr": "GO23a", "authors": { "1": { "first_name": "Alix", "last_name": "Goguey" }, "2": { "first_name": "Michael", "last_name": "Ortega" } }, "date": "2023-02-01", "type": "Revues internationales avec comité de lecture", "journal": "International Journal of Human-Computer Studies" }, { "lang": "en", "publisher": "ACM", "doi": "https://doi.org/10.1145/3536221.3556589", "title": "Keep in Touch: Combining Touch Interaction with Thumb-to-Finger µGestures for People with Visual Impairment", "url": "https://hal.archives-ouvertes.fr/hal-03778999", "abstract": "We present a set of 8 thumb-to-finger microgestures (TTF μGestures) that can be used as an additional modality to enrich touch interaction in eyes-free situations. TTF μGestures possess characteristics especially suited for people with visual impairment (PVI). They have never been studied specifically for PVI to improve accessibility of touchscreen devices. We studied a set of 33 common TTF μGestures to determine which are feasible and usable without seeing while the index is touching a surface. We found that the constrained position of the hand and the absence of vision prevent participants from being able to efficiently target a specific phalanx. Thus, we propose a set of 8 TTF μGestures (6 taps, 2 swipes) balancing resiliency (i.e., low error-rate) and expressivity (i.e., number of possible inputs): as a dimension combined with the touch modality, it would realistically multiply the touch command space by eight. Within our set of 8 TTF μGestures, we chose a subset of 4 μGestures (2 taps and 2 swipes) and implemented an exploration scenario of an audio-tactile map with a raised-line overlay on a touchscreen and tested it with 7 PVI. Their feedback was positive on the potential benefits of TTF μGestures in enhancing the touch modality and supporting PVI interaction with touchscreen devices", "authors": { "1": { "first_name": "Gauthier", "last_name": "Faisandaz" }, "2": { "first_name": "Alix", "last_name": "Goguey" }, "3": { "first_name": "Christophe", "last_name": "Jouffrais" }, "4": { "first_name": "Laurence", "last_name": "Nigay" } }, "year": 2022, "uri": "http://iihm.imag.fr/publication/FGJ+22a/", "pages": "105–116", "bibtype": "inproceedings", "id": 945, "abbr": "FGJ+22a", "address": "Bengaluru (Bangalore), India", "date": "2022-11-07", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "24th ACM International Conference on Multimodal Interaction (ICMI 2022)", "type_publi": "icolcomlec" }, { "lang": "fr", "type_publi": "icolcomlec", "doi": "https://doi.org/None", "title": "µGlyph: a Graphical Notation to Describe Microgestures", "url": "https://hal.archives-ouvertes.fr/hal-03655062", "abstract": "Hand microgestures define a promising modality for rapid and eye-free interaction while holding or not an object. Studied in many contexts, e.g. in virtual/augmented reality, there is no consensual definition of a microgesture, nor a notation to accurately describe a microgesture. The absence of a reference framework leads to ambiguities in the naming or description of microgestures. We propose µGlyph, a graphical notation to precisely describe hand microgestures with different level of abstraction. This notation is based on a vocabulary of elementary events from the biomechanics of the hand. Each event is associated with a context of execution as well as optionnal characteristics such as the finger that makes the micromovement. We study the descriptive power of the µGlyph notation by positioning it with respect to the existing design axes and by describing the most common microgestures of the literature.", "authors": { "1": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "2": { "first_name": "Alix", "last_name": "Goguey" }, "3": { "first_name": "Laurence", "last_name": "Nigay" } }, "year": 2022, "uri": "http://iihm.imag.fr/publication/CGN22a/", "id": 941, "bibtype": "inproceedings", "abbr": "CGN22a", "address": "Namur, Belgium", "date": "2022-04-05", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "33ème conférence internationale francophone sur l’Interaction Humain-Machine (IHM'22)" }, { "lang": "en", "publisher": "ACM", "doi": "https://doi.org/10.1145/1122445.1122456", "title": "Interaction Pace and User Preferences", "url": "https://hal.archives-ouvertes.fr/hal-03237401", "abstract": "The overall pace of interaction combines the user's pace and the system's pace, and a pace mismatch could impair user preferences (e.g., animations or timeouts that are too fast or slow for the user). Motivated by studies of speech rate convergence, we conducted an experiment to examine whether user preferences for system pace are correlated with user pace. Subjects frst completed a series of trials to determine their user pace. They then completed a series of hierarchical drag-and-drop trials in which folders automatically expanded when the cursor hovered for longer than a controlled timeout. Results showed that preferences for timeout values correlated with user pace-slow-paced users preferred long timeouts, and fast-paced users preferred short timeouts. Results indicate potential benefts in moving away from fxed or customisable settings for system pace. Instead, systems could improve preferences by automatically adapting their pace to converge towards that of the user. CCS CONCEPTS • Human-centered computing → HCI theory, concepts and models; Empirical studies in HCI.", "authors": { "1": { "first_name": "Alix", "last_name": "Goguey" }, "2": { "first_name": "Carl", "last_name": "Gutwin" }, "3": { "first_name": "Zhe", "last_name": "Chen" }, "4": { "first_name": "Pang", "last_name": "Suwanaposee" }, "5": { "first_name": "Andy", "last_name": "Cockburn" } }, "year": 2021, "uri": "http://iihm.imag.fr/publication/GGC+21a/", "pages": "1-14", "bibtype": "inproceedings", "id": 907, "abbr": "GGC+21a", "address": "Yokohama Japan, France", "date": "2021-05-08", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "CHI '21: CHI Conference on Human Factors in Computing Systems", "type_publi": "icolcomlec" }, { "lang": "en", "publisher": "Association for Computing Machinery (ACM)", "doi": "https://doi.org/10.1145/3461732", "bibtype": "article", "title": "M[eye]cro : Eye-gaze+Microgestures for Multitasking and Interruptions", "url": "https://hal.archives-ouvertes.fr/hal-03282030", "abstract": "We present M[eye]cro an interaction technique to select on-screen objects and navigate menus through the synergistic use of eye-gaze and thumb-to-finger microgestures. Thumb-to-finger microgestures are gestures performed with the thumb of a hand onto the fingers of the same hand. The active body of research on microgestures highlights expected properties including speed, availability and eye-free interaction. Such properties make microgestures a good candidate for multitasking. However, while praised, the state-of-the-art hypothesis stating that microgestures could be beneficial for multitasking has never been quantitatively verified. We study and compare M[eye]cro to a baseline, i.e., a technique based on physical controllers, in a cockpit-based context. This context allows us to design a controlled experiment involving multitasking with low- and high-priority tasks in parallel. Our results show that performances of the two techniques are similar when participants only perform the selection task. However, M[eye]cro tends to yield better time performance when participants additionally need to treat high-priority tasks in parallel. Results also show that M[eye]cro induces less fatigue and is mostly preferred.", "year": 2021, "uri": "http://iihm.imag.fr/publication/WGN+21a/", "id": 917, "volume": 5, "abbr": "WGN+21a", "authors": { "1": { "first_name": "Jérémy", "last_name": "Wambecke" }, "2": { "first_name": "Alix", "last_name": "Goguey" }, "3": { "first_name": "Laurence", "last_name": "Nigay" }, "4": { "first_name": "Lauren", "last_name": "Dargent" }, "5": { "first_name": "Daniel", "last_name": "Hauret" }, "6": { "first_name": "Stéphanie", "last_name": "Lafon" }, "7": { "first_name": "Jean-Samuel Louis", "last_name": "de Visme" } }, "date": "2021-05-27", "type": "Revues internationales avec comité de lecture", "journal": "Proceedings of the ACM on Human-Computer Interaction ", "type_publi": "irevcomlec" }, { "lang": "en", "publisher": "ACM", "doi": "https://doi.org/10.1145/3357236.3395453", "title": "Morphino: A Nature-Inspired Tool for the Design of Shape-Changing Interfaces", "url": "https://hal.archives-ouvertes.fr/hal-02937726", "abstract": "The HCI community has a strong and growing interest in shape-changing interfaces (SCIs) that can offer dynamic af-fordance. In this context, there is an increasing need for HCI researchers and designers to form close relationships with disciplines such as robotics and material science in order to be able to truly harness the state-of-the-art in morphing technologies. To help these synergies arise, we present Morphino: a card-based toolkit to inspire shape-changing interface designs. Our cards bring together a collection of morphing mechanisms already established in the multidisciplinary literature and illustrate them through familiar examples from nature. We begin by detailing the design of the cards, based on a review of shape-change in nature; then, report on a series of design sessions conducted to demonstrate their usefulness in generating new ideas and in helping end-users gain a better understanding of the possibilities for shape-changing materials.", "authors": { "1": { "first_name": "Isabel", "last_name": "Qamar" }, "2": { "first_name": "Katarzyna", "last_name": "Stawarz" }, "3": { "first_name": "Simon", "last_name": "Robinson" }, "4": { "first_name": "Alix", "last_name": "Goguey" }, "5": { "first_name": "Céline", "last_name": "Coutrix" }, "6": { "first_name": "Anne", "last_name": "Roudaut" } }, "year": 2020, "uri": "http://iihm.imag.fr/publication/QSR+20a/", "pages": "1943-1958", "bibtype": "inproceedings", "id": 887, "abbr": "QSR+20a", "address": "Eindhoven, Netherlands", "date": "2020-07-06", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "DIS '20: Designing Interactive Systems Conference 2020", "type_publi": "icolcomlec" }, { "lang": "en", "type_publi": "icolcomlec", "doi": "https://doi.org/None", "title": "Learning Multiple Mappings: an Evaluation of Interference, Transfer, and Retention with Chorded Shortcut Buttons", "url": "https://hal.archives-ouvertes.fr/hal-02961784", "abstract": "Touch interactions with current mobile devices have limited expressiveness. Augmenting devices with additional degrees of freedom can add power to the interaction, and several augmentations have been proposed and tested. However, there is still little known about the effects of learning multiple sets of augmented interactions that are mapped to different applications. To better understand whether multiple command mappings can interfere with one another, or affect transfer and retention, we developed a prototype with three pushbuttons on a smartphone case that can be used to provide augmented input to the system. The buttons can be chorded to provide seven possible shortcuts or transient mode switches. We mapped these buttons to three different sets of actions, and carried out a study to see if multiple mappings affect learning and performance, transfer, and retention. Our results show that all of the mappings were quickly learned and there was no reduction in performance with multiple mappings. Transfer to a more realistic task was successful, although with a slight reduction in accuracy. Retention after one week was initially poor, but expert performance was quickly restored. Our work provides new information about the design and use of chorded buttons for augmenting input in mobile interactions.", "authors": { "1": { "first_name": "Carl", "last_name": "Gutwin" }, "2": { "first_name": "Carl", "last_name": "Hofmeister" }, "3": { "first_name": "David", "last_name": "Ledo" }, "4": { "first_name": "Alix", "last_name": "Goguey" } }, "year": 2020, "uri": "http://iihm.imag.fr/publication/GHL+20a/", "id": 891, "bibtype": "inproceedings", "abbr": "GHL+20a", "address": "Paris, France", "date": "2020-01-01", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "GI 2020" }, { "lang": "en", "type_publi": "icolcomlec", "doi": "https://doi.org/10.1145/3290605.3300503", "title": "PickCells: A Physically Reconfigurable Cell-composed Touchscreen", "abstract": "Touchscreens are the predominant medium for interactions with digital services; however, their current fixed form factor narrows the scope for rich physical interactions by limiting interaction possibilities to a single, planar surface. In this paper we introduce the concept of PickCells, a fully re- configurable device concept composed of cells, that breaks the mould of rigid screens and explores a modular system that affords rich sets of tangible interactions and novel across- device relationships. Through a series of co-design activities– involving HCI experts and potential end-users of such sys- tems – we synthesised a design space aimed at inspiring future research, giving researchers and designers a frame- work in which to explore modular screen interactions. The design space we propose unifies existing works on modu- lar touch surfaces under a general framework and broadens horizons by opening up unexplored spaces providing new interaction possibilities. In this paper, we present the Pick- Cells concept, a design space of modular touch surfaces, and propose a toolkit for quick scenario prototyping.", "year": 2019, "uri": "http://iihm.imag.fr/publication/GSL+19a/", "pages": "273:1-273:14", "bibtype": "inproceedings", "id": 853, "abbr": "GSL+19a", "authors": { "1": { "first_name": "Alix", "last_name": "Goguey" }, "2": { "first_name": "Cameron", "last_name": "Steer" }, "3": { "first_name": "Andrés", "last_name": "Lucero" }, "4": { "first_name": "Laurence", "last_name": "Nigay" }, "5": { "first_name": "Deepak", "last_name": "Ranjan Sahoo" }, "6": { "first_name": "Céline", "last_name": "Coutrix" }, "7": { "first_name": "Anne", "last_name": "Roudaut" }, "8": { "first_name": "Sriram", "last_name": "Subramanian" }, "9": { "first_name": "Yutaka", "last_name": "Tokuda" }, "10": { "first_name": "Timothy", "last_name": "Neate" }, "11": { "first_name": "Jennifer", "last_name": "Pearson" }, "12": { "first_name": "Simon", "last_name": "Robinson" }, "13": { "first_name": "Matt", "last_name": "Jones" } }, "date": "2019-05-04", "document": "http://iihm.imag.fr/publs/2019/2019chia.pdf", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "CHI 2019, the ACM CHI Conference on Human Factors in Computing Systems" }, { "lang": "en", "type_publi": "icolcomlec", "doi": "https://dx.doi.org/10.1145/3343055.3359703", "title": "BEXHI: A Mechanical Structure for Prototyping Bendable and EXpandable Handheld Interfaces", "url": "http://alixgoguey.fr/BEXHI/", "abstract": "In this paper, we present BEXHI, a new mechanical structure for prototyping expandable and bendable handheld devices. Many research projects have pushed bendable surfaces from prototypes to commercially viable devices. In the meantime, expandable devices have become a topic of interest letting one foresee that such devices are on the horizon. With BEXHI, we provide a structure to explore the combined capabilities of these devices. The structure consists of multiple interweaved units allowing non-porous expandable surfaces to bend. Through an instanciation, we illustrate and discuss that the BEXHI structure allows for the exploration of the combination of both bend and expansion interaction spaces.", "year": 2019, "uri": "http://iihm.imag.fr/publication/OG19a/", "id": 857, "bibtype": "inproceedings", "abbr": "OG19a", "authors": { "1": { "first_name": "Michael", "last_name": "Ortega" }, "2": { "first_name": "Alix", "last_name": "Goguey" } }, "date": "2019-11-10", "document": "http://iihm.imag.fr/publs/2019/BEXHI.pdf", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "Proceedings of the 2019 ACM International Conference on Interactive Surfaces and Spaces" }, { "lang": "en", "publisher": "ACM", "doi": "https://doi.org/10.1145/3366550.3372247", "title": "Reducing Error Aversion to Support Novice-to-Expert Transitions with FastTap", "url": "https://hal.archives-ouvertes.fr/hal-02381584", "abstract": "Expert interaction techniques such as gestures or hotkeys are more efficient than traditional WIMP techniques because it is often faster to recall a command than to navigate to it. However, many users seem to be reluctant to switch to expert interaction. We hypothesize the cause might be the aversion of making errors. To test this, we designed two intermediate modes for the FastTap interaction technique, allowing quick confirmation of what the user has retrieved from memory, and quick adjustment if she has made an error. We investigated the impact of these modes and of various error costs in a controlled study (N=36). We found that participants adopted the intermediate modes, that these modes reduced error rate when error cost was high, and that they did not substantially change selection times. However, while it validates the design of our intermediate modes, we found no evidence of greater switch to memory-based interaction, suggesting that reducing the error rate is not sufficient to promote expert use of techniques.", "authors": { "1": { "first_name": "Alix", "last_name": "Goguey" }, "2": { "first_name": "Sylvain", "last_name": "Malacria" }, "3": { "first_name": "Andy", "last_name": "Cockburn" }, "4": { "first_name": "Carl", "last_name": "Gutwin" } }, "year": 2019, "uri": "http://iihm.imag.fr/publication/GMC+19a/", "pages": "1:1-10", "bibtype": "inproceedings", "id": 908, "abbr": "GMC+19a", "address": "Grenoble, France", "date": "2019-12-10", "type": "Conférences nationales avec comité de lecture sur texte complet", "booktitle": "Actes de la 31e conférence francophone sur l'Interaction Homme-Machine (IHM 2019)", "type_publi": "colcomlec" }]);