BibTeX: David Kotz papers for project 'simba'.
For more information, visit this web page:
https://www.cs.dartmouth.edu/~kotz/research/project/simba/index.html

@Article{campbell:engagement,
  author =        {Cynthia I. Campbell and Ching-Hua Chen and Sara R. Adams and Asma Asyyed and Ninad R. Athale and Monique B. Does and Saeed Hassanpour and Emily Hichborn and Melanie Jackson-Morris and Nicholas C. Jacobson and Heather K. Jones and David Kotz and Chantal A. Lambert-Harris and Zhiguo Li and Bethany McLeman and Varun Mishra and Catherine Stanger and Geetha Subramaniam and Weiyi Wu and Christopher Zegers and Lisa A. Marsch},
  title =         {{Patient Engagement in a Multimodal Digital Phenotyping Study of Opioid Use Disorder}},
  journal =       {Journal of Medical Internet Research (JMIR)},
  year =          2023,
  month =         {June},
  volume =        25,
  articleno =     {e45556},
  numpages =      14,
  publisher =     {JMIR Publications},
  copyright =     {the authors},
  DOI =           {10.2196/45556},
  PMID =          37310787,
  URL =           {https://www.cs.dartmouth.edu/~kotz/research/campbell-engagement/index.html},
  abstract =      { \emph{Background:} Multiple digital data sources can capture moment-to-moment information to advance a robust understanding of opioid use disorder (OUD) behavior, ultimately creating a digital phenotype for each patient. This information can lead to individualized interventions to improve treatment for OUD.  \par  \emph{Objective:} The aim is to examine patient engagement with multiple digital phenotyping methods among patients receiving buprenorphine medication for OUD.  \par  \emph{Methods:} The study enrolled 65 patients receiving buprenorphine for OUD between June 2020 and January 2021 from 4 addiction medicine programs in an integrated health care delivery system in Northern California. Ecological momentary assessment (EMA), sensor data, and social media data were collected by smartphone, smartwatch, and social media platforms over a 12-week period. Primary engagement outcomes were meeting measures of minimum phone carry ({$\geq$}8 hours per day) and watch wear ({$\geq$}18 hours per day) criteria, EMA response rates, social media consent rate, and data sparsity. Descriptive analyses, bivariate, and trend tests were performed.  \par  \emph{Results:} The participants' average age was 37 years, 47\% of them were female, and 71\% of them were White. On average, participants met phone carrying criteria on 94\% of study days, met watch wearing criteria on 74\% of days, and wore the watch to sleep on 77\% of days. The mean EMA response rate was 70\%, declining from 83\% to 56\% from week 1 to week 12. Among participants with social media accounts, 88\% of them consented to providing data; of them, 55\% of Facebook, 54\% of Instagram, and 57\% of Twitter participants provided data. The amount of social media data available varied widely across participants. No differences by age, sex, race, or ethnicity were observed for any outcomes.  \par  \emph{Conclusions:} To our knowledge, this is the first study to capture these 3 digital data sources in this clinical population. Our findings demonstrate that patients receiving buprenorphine treatment for OUD had generally high engagement with multiple digital phenotyping data sources, but this was more limited for the social media data.  \par  \emph{International Registered Report Identifier (IRRID):} RR2-10.3389/fpsyt.2022.871916 },
}

@Article{heinz:ema,
  author =        {Michael V. Heinz and George D. Price and Avijit Singh and Sukanya Bhattacharya and Ching-Hua Chen and Asma Asyyed and Monique B. Does and Saeed Hassanpour and Emily Hichborn and David Kotz and Chantal A. Lambert-Harris and Zhiguo Li and Bethany McLeman and Varun Mishra and Catherine Stanger and Geetha Subramaniam and Weiyi Wu and Cynthia I. Campbell and Lisa A. Marsch and Nicholas C. Jacobson},
  title =         {{A longitudinal observational study with ecological momentary assessment and deep learning to predict non-prescribed opioid use, treatment retention, and medication nonadherence among persons receiving medication treatment for opioid use disorder}},
  journal =       {Journal of Substance Use and Addiction Treatment (JSAT)},
  year =          2025,
  month =         {March},
  volume =        173,
  articleno =     209685,
  numpages =      10,
  publisher =     {Elsevier},
  copyright =     {the authors},
  DOI =           {10.1016/j.josat.2025.209685},
  URL =           {https://www.cs.dartmouth.edu/~kotz/research/heinz-ema/index.html},
  abstract =      { \emph{Background:} Despite effective treatments for opioid use disorder (OUD), relapse and treatment drop-out diminish their efficacy, increasing the risks of adverse outcomes, including death. Predicting important outcomes, including non-prescribed opioid use (NPOU) and treatment discontinuation among persons receiving medications for OUD (MOUD) can provide a proactive approach to these challenges. Our study uses ecological momentary assessment (EMA) and deep learning to predict momentary NPOU, medication nonadherence, and treatment retention in MOUD patients.  \par  \emph{Methods:} Study participants included adults receiving MOUD at a large outpatient treatment program. We predicted NPOU (EMA-based), medication nonadherence (Electronic Health Record [EHR]- and EMA-based), and treatment retention (EHR-based) using context-sensitive EMAs (e.g., stress, pain, social setting). We used recurrent deep learning models with 7-day sliding windows to predict the next-day outcomes, using Area Under the ROC Curve (AUC) for assessment. We employed SHapley additive ExPlanations (SHAP) to understand feature latency and importance.  \par  \emph{Results:} Participants comprised 62 adults with 14,322 observations. Model performance varied across EMA subtypes and outcomes with AUCs spanning 0.59-0.97. Recent substance use was the best performing predictor for EMA-based NPOU (AUC{$=$}0.97) and medication nonadherence (AUC{$=$}0.68); life-contextual factors performed best for EHR-based medication nonadherence (AUC{$=$}0.89) and retention (AUC{$=$}0.80). SHAP revealed varying latencies between predictors and outcomes.  \par  \emph{Conclusions:} Findings support the effectiveness of EMA and deep learning for forecasting actionable outcomes in persons receiving MOUD. These insights will enable the development of personalized dynamic risk profiles and just-in-time adaptive interventions (JITAIs) to mitigate high-risk OUD outcomes. },
}

@Misc{hong:receptivity-thesis,
  author =        {Sarah Hong},
  title =         {{Exploring the Relationship Between Intrinsic Motivation and Receptivity to mHealth Interventions}},
  school =        {Dartmouth Computer Science},
  year =          2021,
  month =         {June},
  copyright =     {the author},
  address =       {Hanover, NH},
  URL =           {https://www.cs.dartmouth.edu/~kotz/research/hong-receptivity-thesis/index.html},
  note =          {Undergraduate Thesis},
  abstract =      {Recent research in mHealth has shown the promise of Just-in-Time Adaptive Interventions (JITAIs). JITAIs aim to deliver the right type and amount of support at the right time. Choosing the right delivery time involves determining a user's state of receptivity, that is, the degree to which a user is willing to accept, process, and use the intervention provided.  \par  Although past work on generic phone notifications has found evidence that users are more likely to respond to notifications with content they view as useful, there is no existing research on whether users' intrinsic motivation for the underlying topic of mHealth interventions affects their receptivity. In this work, we explore whether relationships exist between intrinsic motivation and receptivity across topics and within topics for mHealth interventions. To this end, we conducted a study with 20 participants over 3 weeks, where participants received interventions about mental health, COVID-19, physical activity, and diet \& nutrition. The interventions were delivered by the chatbot-based iOS app called Elena+, and via the MobileCoach platform.  \par  Our exploratory analysis found that significant differences in mean intrinsic motivation scores across topics were not associated with differences in mean receptivity metrics across topics. We also found that positive relationships exist between intrinsic motivation measures and receptivity for interventions about a topic.},
}

@Article{koch:car-receptivity,
  author =        {Kevin Koch and Varun Mishra and Shu Liu and Thomas Berger and Elgar Fleisch and David Kotz and Felix Wortmann},
  title =         {{When Do Drivers Interact with In-vehicle Well-being Interventions? An Exploratory Analysis of a Longitudinal Study on Public Roads}},
  journal =       {Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)},
  year =          2021,
  month =         {March},
  volume =        5,
  number =        1,
  articleno =     19,
  numpages =      30,
  publisher =     {ACM},
  copyright =     {ACM},
  DOI =           {10.1145/3448116},
  URL =           {https://www.cs.dartmouth.edu/~kotz/research/koch-car-receptivity/index.html},
  abstract =      {Recent developments of novel in-vehicle interventions show the potential to transform the otherwise routine and mundane task of commuting into opportunities to improve the drivers' health and well-being. Prior research has explored the effectiveness of various in-vehicle interventions and has identified moments in which drivers could be interruptible to interventions. All the previous studies, however, were conducted in either simulated or constrained real-world driving scenarios on a pre-determined route. In this paper, we take a step forward and evaluate when drivers interact with in-vehicle interventions in unconstrained free-living conditions.  \par  To this end, we conducted a two-month longitudinal study with 10 participants, in which each participant was provided with a study car for their daily driving needs. We delivered two in-vehicle interventions - each aimed at improving affective well-being - and simultaneously recorded the participants' driving behavior. In our analysis, we found that several pre-trip characteristics (like trip length, traffic flow, and vehicle occupancy) and the pre-trip affective state of the participants had significant associations with whether the participants started an intervention or canceled a started intervention. Next, we found that several in-the-moment driving characteristics (like current road type, past average speed, and future brake behavior) showed significant associations with drivers' responsiveness to the intervention. Further, we identified several driving behaviors that "negated" the effectiveness of interventions and highlight the potential of using such "negative" driving characteristics to better inform intervention delivery. Finally, we compared trips with and without intervention and found that both interventions employed in our study did not have a negative effect on driving behavior. Based on our analyses, we provide solid recommendations on how to deliver interventions to maximize responsiveness and effectiveness and minimize the burden on the drivers.},
}

@Article{kramer:ally1,
  author =        {Jan-Niklas Kramer and Florian K{\"{u}}nzler and Varun Mishra and Bastien Presset and David Kotz and Shawna Smith and Urte Scholz and Tobias Kowatsch},
  title =         {{Investigating Intervention Components and Exploring States of Receptivity for a Smartphone App to Promote Physical Activity: Protocol of a Microrandomized Trial}},
  journal =       {JMIR Research Protocols},
  year =          2019,
  month =         {January},
  volume =        8,
  number =        1,
  articleno =     {e11540},
  numpages =      17,
  publisher =     {JMIR Publications},
  copyright =     {the authors},
  DOI =           {10.2196/11540},
  URL =           {https://www.cs.dartmouth.edu/~kotz/research/kramer-ally1/index.html},
  abstract =      {Background: Smartphones enable the implementation of just-in-time adaptive interventions (JITAIs) that tailor the delivery of health interventions over time to user- and time-varying context characteristics. Ideally, JITAIs include effective intervention components, and delivery tailoring is based on effective moderators of intervention effects. Using machine learning techniques to infer each user's context from smartphone sensor data is a promising approach to further enhance tailoring. \par  Objective: The primary objective of this study is to quantify main effects, interactions, and moderators of 3 intervention components of a smartphone-based intervention for physical activity. The secondary objective is the exploration of participants' states of receptivity, that is, situations in which participants are more likely to react to intervention notifications through collection of smartphone sensor data. \par  Methods: In 2017, we developed the Assistant to Lift your Level of activitY (Ally), a chatbot-based mobile health intervention for increasing physical activity that utilizes incentives, planning, and self-monitoring prompts to help participants meet personalized step goals. We used a microrandomized trial design to meet the study objectives. Insurees of a large Swiss insurance company were invited to use the Ally app over a 12-day baseline and a 6-week intervention period. Upon enrollment, participants were randomly allocated to either a financial incentive, a charity incentive, or a no incentive condition. Over the course of the intervention period, participants were repeatedly randomized on a daily basis to either receive prompts that support self-monitoring or not and on a weekly basis to receive 1 of 2 planning interventions or no planning. Participants completed a Web-based questionnaire at baseline and postintervention follow-up. \par  Results: Data collection was completed in January 2018. In total, 274 insurees (mean age 41.73 years; 57.7\% [158/274] female) enrolled in the study and installed the Ally app on their smartphones. Main reasons for declining participation were having an incompatible smartphone (37/191, 19.4\%) and collection of sensor data (35/191, 18.3\%). Step data are available for 227 (82.8\%, 227/274) participants, and smartphone sensor data are available for 247 (90.1\%, 247/274) participants. \par  Conclusions: This study describes the evidence-based development of a JITAI for increasing physical activity. If components prove to be efficacious, they will be included in a revised version of the app that offers scalable promotion of physical activity at low cost. \par  Trial Registration: ClinicalTrials.gov NCT03384550; https://clinicaltrials.gov/ct2/show/NCT03384550 (Archived by WebCite at http://www.webcitation.org/74IgCiK3d) \par  International Registered Report Identifier (IRRID): DERR1-10.2196/11540},
}

@Article{kramer:step-goals,
  author =        {Jan-Niklas Kramer and Florian K{\"{u}}nzler and Varun Mishra and Shawna N. Smith and David Kotz and Urte Scholz and Elgar Fleisch and Tobias Kowatsch},
  title =         {{Which Components of a Smartphone Walking App Help Users to Reach Personalized Step Goals? Results From an Optimization Trial}},
  journal =       {Annals of Behavioral Medicine},
  year =          2020,
  month =         {July},
  volume =        54,
  number =        7,
  pages =         {518--528},
  publisher =     {Oxford University Press},
  copyright =     {the authors},
  DOI =           {10.1093/abm/kaaa002},
  URL =           {https://www.cs.dartmouth.edu/~kotz/research/kramer-step-goals/index.html},
  note =          {Published 17 March 2020},
  abstract =      {Background: The Assistant to Lift your Level of activitY (Ally) app is a smartphone application that combines financial incentives with chatbot-guided interventions to encourage users to reach personalized daily step goals. \par  Purpose: To evaluate the effects of incentives, weekly planning, and daily self-monitoring prompts that were used as intervention components as part of the Ally app. \par  Methods: We conducted an 8 week optimization trial with n {$=$} 274 insurees of a health insurance company in Switzerland. At baseline, participants were randomized to different incentive conditions (cash incentives vs. charity incentives vs. no incentives). Over the course of the study, participants were randomized weekly to different planning conditions (action planning vs. coping planning vs. no planning) and daily to receiving or not receiving a self-monitoring prompt. Primary outcome was the achievement of personalized daily step goals. \par  Results: Study participants were more active and healthier than the general Swiss population. Daily cash incentives increased step-goal achievement by 8.1\%, 95\% confidence interval (CI): [2.1, 14.1] and, only in the no-incentive control group, action planning increased step-goal achievement by 5.8\%, 95\% CI: [1.2, 10.4]. Charity incentives, self-monitoring prompts, and coping planning did not affect physical activity. Engagement with planning interventions and self-monitoring prompts was low and 30\% of participants stopped using the app over the course of the study. \par  Conclusions: Daily cash incentives increased physical activity in the short term. Planning interventions and self-monitoring prompts require revision before they can be included in future versions of the app. Selection effects and engagement can be important challenges for physical-activity apps. \par  Clinical Trial Information: This study was registered on ClinicalTrials.gov, NCT03384550.},
}

@InProceedings{kunzler:ally-poster,
  author =        {Florian K{\"{u}}nzler and Jan-Niklas Kramer and Varun Mishra and Bastien Presset and Shawna N. Smith and David Kotz and Urte Scholz and Elgar Fleisch and Tobias Kowatsch},
  title =         {{Ally: A Smartphone-based Physical Activity Intervention}},
  booktitle =     {{CSS Health Insurance meets CDHI Event}},
  year =          2017,
  month =         {December},
  numpages =      1,
  copyright =     {the authors},
  address =       {Lucerne, Switzerland},
  DOI =           {10.13140/RG.2.2.13559.93605},
  URL =           {https://www.cs.dartmouth.edu/~kotz/research/kunzler-ally-poster/index.html},
  abstract =      {No behavior has an impact on human health as great as physical activity (PA). We therefore developed Ally, a smartphone-based 6-week PA intervention. Ally seeks to exploit the ubiquity and sensing capabilities of mobile phones to adapt the provision of PA interventions to the context of the user. In this research we investigate the following research questions: (1) What are effective components of Ally, a mHealth physical activity intervention? and (2) Can mobile sensor data predict opportune moments for interventions?},
}

@Article{kunzler:receptivity,
  author =        {Florian K{\"{u}}nzler and Varun Mishra and Jan-Niklas Kramer and David Kotz and Elgar Fleisch and Tobias Kowatsch},
  title =         {{Exploring the State-of-Receptivity for mHealth Interventions}},
  journal =       {Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT) (Ubicomp)},
  year =          2019,
  month =         {December},
  volume =        3,
  number =        4,
  articleno =     140,
  numpages =      27,
  publisher =     {ACM},
  copyright =     {ACM},
  DOI =           {10.1145/3369805},
  URL =           {https://www.cs.dartmouth.edu/~kotz/research/kunzler-receptivity/index.html},
  abstract =      {Recent advancements in sensing techniques for mHealth applications have led to successful development and deployments of several mHealth intervention designs, including Just-In-Time Adaptive Interventions (JITAI). JITAIs show great potential because they aim to provide the right type and amount of support, at the right time. Timing the delivery of a JITAI such as the user is receptive and available to engage with the intervention is crucial for a JITAI to succeed. Although previous research has extensively explored the role of context in users' responsiveness towards generic phone notifications, it has not been thoroughly explored for actual mHealth interventions. In this work, we explore the factors affecting users' receptivity towards JITAIs. To this end, we conducted a study with 189 participants, over a period of 6 weeks, where participants received interventions to improve their physical activity levels. The interventions were delivered by a chatbot-based digital coach - Ally - which was available on Android and iOS platforms. \par  We define several metrics to gauge receptivity towards the interventions, and found that (1) several participant-specific characteristics (age, personality, and device type) show significant associations with the overall participant receptivity over the course of the study, and that (2) several contextual factors (day/time, phone battery, phone interaction, physical activity, and location), show significant associations with the participant receptivity, in-the-moment. Further, we explore the relationship between the effectiveness of the intervention and receptivity towards those interventions; based on our analyses, we speculate that being receptive to interventions helped participants achieve physical activity goals, which in turn motivated participants to be more receptive to future interventions. Finally, we build machine-learning models to detect receptivity, with up to a 77\% increase in F1 score over a biased random classifier.},
}

@Article{kunzler:receptivity-supplement,
  author =        {Florian K{\"{u}}nzler and Varun Mishra and Jan-Niklas Kramer and David Kotz and Elgar Fleisch and Tobias Kowatsch},
  title =         {{Exploring the State-of-Receptivity for mHealth Interventions: Supplementary Material}},
  journal =       {Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT) (Ubicomp)},
  year =          2019,
  month =         {December},
  volume =        3,
  number =        4,
  articleno =     140,
  numpages =      27,
  publisher =     {ACM},
  copyright =     {ACM},
  DOI =           {10.1145/3369805},
  URL =           {https://www.cs.dartmouth.edu/~kotz/research/kunzler-receptivity-supplement/index.html},
  abstract =      {Supplemental materials include: movie, appendix, image and software files},
}

@Article{marsch:dtect-protocol,
  author =        {Lisa A. Marsch and Ching-Hua Chen and Sara R. Adams and Asma Asyyed and Monique B. Does and Saeed Hassanpour and Emily Hichborn and Melanie Jackson-Morris and Nicholas C. Jacobson and Heather K. Jones and David Kotz and Chantal A. Lambert-Harris and Zhiguo Li and Bethany McLeman and Varun Mishra and Catherine Stanger and Geetha Subramaniam and Weiyi Wu and Cynthia I. Campbell},
  title =         {{The Feasibility and Utility of Harnessing Digital Health to Understand Clinical Trajectories in Medication Treatment for Opioid Use Disorder: D-TECT Study Design and Methodological Considerations}},
  journal =       {Frontiers in Psychiatry},
  year =          2022,
  month =         {April},
  day =           29,
  volume =        13,
  articleno =     871916,
  numpages =      12,
  publisher =     {Frontiers Media},
  copyright =     {the authors},
  DOI =           {10.3389/fpsyt.2022.871916},
  URL =           {https://www.cs.dartmouth.edu/~kotz/research/marsch-dtect-protocol/index.html},
  note =          {Section: Addictive Disorders},
  abstract =      { \emph{Introduction:} Across the U.S., the prevalence of opioid use disorder (OUD) and the rates of opioid overdoses have risen precipitously in recent years. Several effective medications for OUD (MOUD) exist and have been shown to be life-saving. A large volume of research has identified a confluence of factors that predict attrition and continued substance use during substance use disorder treatment. However, much of this literature has examined a small set of potential moderators or mediators of outcomes in MOUD treatment and may lead to over-simplified accounts of treatment non-adherence. Digital health methodologies offer great promise for capturing intensive, longitudinal ecologically-valid data from individuals in MOUD treatment to extend our understanding of factors that impact treatment engagement and outcomes.  \par \emph{Methods:} This paper describes the protocol (including the study design and methodological considerations) from a novel study supported by the National Drug Abuse Treatment Clinical Trials Network at the National Institute on Drug Abuse (NIDA). This study (D-TECT) primarily seeks to evaluate the feasibility of collecting ecological momentary assessment (EMA), smartphone and smartwatch sensor data, and social media data among patients in outpatient MOUD treatment. It secondarily seeks to examine the utility of EMA, digital sensing, and social media data (separately and compared to one another) in predicting MOUD treatment retention, opioid use events, and medication adherence [as captured in electronic health records (EHR) and EMA data]. To our knowledge, this is the first project to include all three sources of digitally derived data (EMA, digital sensing, and social media) in understanding the clinical trajectories of patients in MOUD treatment. These multiple data streams will allow us to understand the relative and combined utility of collecting digital data from these diverse data sources. The inclusion of EHR data allows us to focus on the utility of digital health data in predicting objectively measured clinical outcomes.  \par \emph{Discussion:} Results may be useful in elucidating novel relations between digital data sources and OUD treatment outcomes. It may also inform approaches to enhancing outcomes measurement in clinical trials by allowing for the assessment of dynamic interactions between individuals' daily lives and their MOUD treatment response.  \par \emph{Clinical Trial Registration:} Identifier: NCT04535583.},
  annote =        {This article is part of the Research Topic "Novel Treatment Approaches and Future Directions in Substance Use Disorders".},
}

@InProceedings{mishra:commodity,
  author =        {Varun Mishra and Gunnar Pope and Sarah Lord and Stephanie Lewia and Byron Lowens and Kelly Caine and Sougata Sen and Ryan Halter and David Kotz},
  title =         {{The Case for a Commodity Hardware Solution for Stress Detection}},
  booktitle =     {{Proceedings of the Workshop on Mental Health: Sensing \& Intervention}},
  year =          2018,
  month =         {October},
  pages =         {1717--1728},
  publisher =     {ACM},
  copyright =     {ACM},
  DOI =           {10.1145/3267305.3267538},
  URL =           {https://www.cs.dartmouth.edu/~kotz/research/mishra-commodity/index.html},
  abstract =      {Timely detection of an individual's stress level has the potential to expedite and improve stress management, thereby reducing the risk of adverse health consequences that may arise due to unawareness or mismanagement of stress. Recent advances in wearable sensing have resulted in multiple approaches to detect and monitor stress with varying levels of accuracy. The most accurate methods, however, rely on clinical grade sensors strapped to the user. These sensors measure physiological signals of a person and are often bulky, custom-made, expensive, and/or in limited supply, hence limiting their large-scale adoption by researchers and the general public. In this paper, we explore the viability of commercially available off-the-shelf sensors for stress monitoring. The idea is to be able to use cheap, non-clinical sensors to capture physiological signals, and make inferences about the wearer's stress level based on that data. In this paper, we describe a system involving a popular off-the-shelf heart-rate monitor, the Polar H7; we evaluated our system in a lab setting with three well-validated stress-inducing stimuli with 26 participants. Our analysis shows that using the off-the-shelf sensor alone, we were able to detect stressful events with an F1 score of 0.81, on par with clinical-grade sensors.},
}

@TechReport{mishra:ema-tr,
  author =        {Varun Mishra and Byron Lowens and Sarah Lord and Kelly Caine and David Kotz},
  title =         {{Investigating Contextual Cues as Indicators for EMA Delivery}},
  institution =   {Dartmouth Computer Science},
  year =          2018,
  month =         {April},
  number =        {TR2018-842},
  copyright =     {the authors},
  URL =           {https://www.cs.dartmouth.edu/~kotz/research/mishra-ema-tr/index.html},
  abstract =      {In this work, we attempt to determine whether the contextual information of a participant can be used to predict whether the participant will respond to a particular Ecological Momentary Assessment (EMA) prompt. We use a publicly available dataset for our work, and find that by using basic contextual features about the participant's activity, conversation status, audio, and location, we can predict whether an EMA prompt triggered at a particular time will be answered with a precision of 0.647, which is significantly higher than a baseline precision of 0.410. Using this knowledge, the researchers conducting field studies can efficiently schedule EMA prompts and achieve higher response rates.},
}

@InProceedings{mishra:ema-workshop,
  author =        {Varun Mishra and Byron Lowens and Sarah Lord and Kelly Caine and David Kotz},
  title =         {{Investigating Contextual Cues As Indicators for EMA Delivery}},
  booktitle =     {{Proceedings of the International Workshop on Smart and Ambient Notification and Attention Management (UbiTtention)}},
  year =          2017,
  month =         {September},
  pages =         {935--940},
  publisher =     {ACM},
  copyright =     {ACM},
  location =      {Maui, Hawaii},
  DOI =           {10.1145/3123024.3124571},
  URL =           {https://www.cs.dartmouth.edu/~kotz/research/mishra-ema-workshop/index.html},
  abstract =      {In this work, we attempt to determine whether the contextual information of a participant can be used to predict whether the participant will respond to a particular EMA trigger. We use a publicly available dataset for our work, and find that by using basic contextual features about the participant's activity, conversation status, audio, and location, we can predict if an EMA triggered at a particular time will be answered with a precision of 0.647, which is significantly higher than a baseline precision of 0.41. Using this knowledge, the researchers conducting field studies can efficiently schedule EMAs and achieve higher response rates.},
}

@Article{mishra:jcommodity,
  author =        {Varun Mishra and Gunnar Pope and Sarah Lord and Stephanie Lewia and Byron Lowens and Kelly Caine and Sougata Sen and Ryan Halter and David Kotz},
  title =         {{Continuous Detection of Physiological Stress with Commodity Hardware}},
  journal =       {ACM Transactions on Computing for Healthcare (HEALTH)},
  year =          2020,
  month =         {April},
  volume =        1,
  number =        2,
  articleno =     8,
  numpages =      30,
  publisher =     {ACM},
  copyright =     {the authors},
  DOI =           {10.1145/3361562},
  URL =           {https://www.cs.dartmouth.edu/~kotz/research/mishra-jcommodity/index.html},
  abstract =      {Timely detection of an individual's stress level has the potential to improve stress management, thereby reducing the risk of adverse health consequences that may arise due to mismanagement of stress. Recent advances in wearable sensing have resulted in multiple approaches to detect and monitor stress with varying levels of accuracy. The most accurate methods, however, rely on clinical-grade sensors to measure physiological signals; they are often bulky, custom made, and expensive, hence limiting their adoption by researchers and the general public. In this article, we explore the viability of commercially available off-the-shelf sensors for stress monitoring. The idea is to be able to use cheap, nonclinical sensors to capture physiological signals and make inferences about the wearer's stress level based on that data. We describe a system involving a popular off-the-shelf heart rate monitor, the Polar H7; we evaluated our system with 26 participants in both a controlled lab setting with three well-validated stress-inducing stimuli and in free-living field conditions. Our analysis shows that using the off-the-shelf sensor alone, we were able to detect stressful events with an F1-score of up to 0.87 in the lab and 0.66 in the field, on par with clinical-grade sensors.},
}

@Article{mishra:receptivity,
  author =        {Varun Mishra and Florian K{\"{u}}nzler and Jan-Niklas Kramer and Elgar Fleisch and Tobias Kowatsch and David Kotz},
  title =         {{Detecting Receptivity for mHealth Interventions in the Natural Environment}},
  journal =       {Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT/UbiComp)},
  year =          2021,
  month =         {June},
  volume =        5,
  number =        2,
  articleno =     74,
  numpages =      24,
  publisher =     {ACM},
  copyright =     {ACM},
  DOI =           {10.1145/3463492},
  URL =           {https://www.cs.dartmouth.edu/~kotz/research/mishra-receptivity/index.html},
  abstract =      { Just-In-Time Adaptive Intervention (JITAI) is an emerging technique with great potential to support health behavior by providing the right type and amount of support at the right time. A crucial aspect of JITAIs is properly timing the delivery of interventions, to ensure that a user is receptive and ready to process and use the support provided. Some prior works have explored the association of context and some user-specific traits on receptivity, and have built post-study machine-learning models to detect receptivity. For effective intervention delivery, however, a JITAI system needs to make in-the-moment decisions about a user's receptivity. To this end, we conducted a study in which we deployed machine-learning models to detect receptivity in the natural environment, i.e., in free-living conditions.  \par  We leveraged prior work regarding receptivity to JITAIs and deployed a chatbot-based digital coach - Ally - that provided physical-activity interventions and motivated participants to achieve their step goals. We extended the original Ally app to include two types of machine-learning model that used contextual information about a person to predict when a person is receptive: a \emph{static model} that was built before the study started and remained constant for all participants and an \emph{adaptive model} that continuously learned the receptivity of individual participants and updated itself as the study progressed. For comparison, we included a \emph{control model} that sent intervention messages at random times. The app randomly selected a delivery model for each intervention message. We observed that the machine-learning models led up to a 40\% improvement in receptivity as compared to the control model. Further, we evaluated the temporal dynamics of the different models and observed that receptivity to messages from the adaptive model increased over the course of the study.},
}

@Article{mishra:receptivity-highlight,
  author =        {Varun Mishra and Florian K{\"{u}}nzler and Jan-Niklas Kramer and Elgar Fleisch and Tobias Kowatsch and David Kotz},
  title =         {{Detecting Receptivity for mHealth Interventions}},
  journal =       {GetMobile: Mobile Computing and Communications},
  year =          2023,
  month =         {June},
  volume =        27,
  number =        2,
  pages =         {23--28},
  publisher =     {ACM},
  copyright =     {ACM},
  DOI =           {10.1145/3614214.3614221},
  URL =           {https://www.cs.dartmouth.edu/~kotz/research/mishra-receptivity-highlight/index.html},
  abstract =      { Just-In-Time Adaptive Interventions (JITAI) have the potential to provide effective support for health behavior by delivering the right type and amount of intervention at the right time. The timing of interventions is crucial to ensure that users are receptive and able to use the support provided. Previous research has explored the association of context and user-specific traits on receptivity and built machine-learning models to detect receptivity after the study was completed. However, for effective intervention delivery, JITAI systems need to make in-the-moment decisions about a user's receptivity. In this study, we deployed machinelearning models in a chatbot-based digital coach to predict receptivity for physical-activity interventions. We included a static model that was built before the study and an adaptive model that continuously updated itself during the study. Compared to a control model that sent intervention messages randomly, the machine-learning models improved receptivity by up to 36\%. Receptivity to messages from the adaptive model increased over time.},
}

@TechReport{mishra:receptivity-tr,
  author =        {Varun Mishra and Florian K{\"{u}}nzler and Jan-Niklas Kramer and Elgar Fleisch and Tobias Kowatsch and David Kotz},
  title =         {{Detecting Receptivity for mHealth Interventions in the Natural Environment}},
  institution =   {arXiv},
  year =          2020,
  month =         {November},
  day =           16,
  number =        {arXiv:2011.08302},
  copyright =     {the authors},
  URL =           {https://www.cs.dartmouth.edu/~kotz/research/mishra-receptivity-tr/index.html},
  note =          {v1},
  abstract =      {JITAI is an emerging technique with great potential to support health behavior by providing the right type and amount of support at the right time. A crucial aspect of JITAIs is properly timing the delivery of interventions, to ensure that a user is receptive and ready to process and use the support provided. Some prior works have explored the association of context and some user-specific traits on receptivity, and have built post-study machine-learning models to detect receptivity. For effective intervention delivery, however, a JITAI system needs to make in-the-moment decisions about a user's receptivity. To this end, we conducted a study in which we deployed machine-learning models to detect receptivity in the natural environment, i.e., in free-living conditions.  We leveraged prior work regarding receptivity to JITAIs and deployed a chatbot-based digital coach -- Walkie -- that provided physical-activity interventions and motivated participants to achieve their step goals. The Walkie app included two types of machine-learning model that used contextual information about a person to predict when a person is receptive: a static model that was built before the study started and remained constant for all participants and an adaptive model that continuously learned the receptivity of individual participants and updated itself as the study progressed. For comparison, we included a control model that sent intervention messages at random times. The app randomly selected a delivery model for each intervention message. We observed that the machine-learning models led up to a 40\% improvement in receptivity as compared to the control model. Further, we evaluated the temporal dynamics of the different models and observed that receptivity to messages from the adaptive model increased over the course of the study.  },
}

@Article{mishra:stress-ml,
  author =        {Varun Mishra and Sougata Sen and Grace Chen and Tian Hao and Jeffrey Rogers and Ching-Hua Chen and David Kotz},
  title =         {{Evaluating the Reproducibility of Physiological Stress Detection Models}},
  journal =       {Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT/UbiComp)},
  year =          2020,
  month =         {December},
  volume =        4,
  number =        4,
  articleno =     147,
  numpages =      29,
  publisher =     {ACM},
  copyright =     {ACM},
  DOI =           {10.1145/3432220},
  URL =           {https://www.cs.dartmouth.edu/~kotz/research/mishra-stress-ml/index.html},
  abstract =      {Recent advances in wearable sensor technologies have led to a variety of approaches for detecting physiological stress. Even with over a decade of research in the domain, there still exist many significant challenges, including a near-total lack of reproducibility across studies. Researchers often use some physiological sensors (custom-made or off-the-shelf), conduct a study to collect data, and build machine-learning models to detect stress. There is little effort to test the applicability of the model with similar physiological data collected from different devices, or the efficacy of the model on data collected from different studies, populations, or demographics. \par  This paper takes the first step towards testing reproducibility and validity of methods and machine-learning models for stress detection. To this end, we analyzed data from 90 participants, from four independent controlled studies, using two different types of sensors, with different study protocols and research goals. We started by evaluating the performance of models built using data from one study and tested on data from other studies. Next, we evaluated new methods to improve the performance of stress-detection models and found that our methods led to a consistent increase in performance across all studies, irrespective of the device type, sensor type, or the type of stressor. Finally, we developed and evaluated a clustering approach to determine the stressed/not-stressed classification when applying models on data from different studies, and found that our approach performed better than selecting a threshold based on training data. This paper's thorough exploration of reproducibility in a controlled environment provides a critical foundation for deeper study of such methods, and is a prerequisite for tackling reproducibility in free-living conditions.},
}

@PhdThesis{mishra:thesis,
  author =        {Varun Mishra},
  title =         {{Towards Effective Delivery of Digital Interventions for Mental and Behavioral Health}},
  school =        {Dartmouth Computer Science},
  year =          2021,
  month =         {September},
  copyright =     {the author},
  address =       {Hanover, NH},
  URL =           {https://www.cs.dartmouth.edu/~kotz/research/mishra-thesis/index.html},
  abstract =      {The pervasiveness of sensor-rich mobile, wearable, and IoT devices has enabled researchers to passively sense various user traits and characteristics, which in turn have the potential to detect and predict different mental and behavioral health outcomes. Upon detecting or anticipating a negative outcome, the same devices can be used to deliver in-the-moment in- terventions and support to help users. One important factor that determines the effectiveness of digital health interventions is delivering them at the right time: (1) when a person needs support, i.e., at or before the onset of a negative outcome, or a psychological or contextual state that might lead to that outcome (state-of-vulnerability); and (2) when a person is able and willing to receive, process, and use the support provided (state-of-receptivity). In this dissertation, we present our work on determining when to deliver interventions by exploring and detecting both vulnerability and receptivity.\par  In the first part of the thesis, we discuss our work on accurate sensing and detection of different states-of-vulnerability. We start by discussing our work on advancing the field of physiological stress sensing. We took the first step towards testing the reproducibility and validity of our methods and machine-learning models for stress detection. To this end, we analyzed data from 90 participants from four independent controlled studies, using two different types of sensors, with different study protocols and research goals. We evaluated new methods to improve the performance of stress-detection models and found that our methods led to a consistent increase in performance across all studies, irrespective of the device type, sensor type, or the type of stressor. Our thorough exploration of reproducibility in a controlled environment provides a critical foundation for deeper study of such methods, and is a prerequisite for tackling reproducibility in free-living conditions.  \par  Next, we present our work on detecting at-risk indicators for patients undergoing Opioid Use Disorder (OUD) treatment. We conducted a 12-week study with 59 patients undergoing an OUD treatment and collected sensor data, like location, physical activity, sleep, and heart rate, from smartphones and wearables. We used the data collected to formulate low- level contextual features and high-level behavioral features and explored the feasibility of detecting self-reported stress, craving, and mood of the participants. Our results show that adaptive, personalized models can detect different at-risk behaviors with the area under the receiver operating characteristic (AUROC) values of up to 0.85.  \par  In the second part of this dissertation, we discuss our contributions in the domain of state-of-receptivity for digital health interventions. We start by conducting a study with 189 participants in Switzerland to explore participant receptivity towards actual physical activity behavior change interventions and report novel and significant results, e.g., being more receptive to interventions leads to higher goal completion likelihood. We further built machine-learning models to predict state-of-receptivity and deployed those models in a real-world study with participants in the United States to evaluate their effectiveness. Our results show that participants were more receptive to interventions delivered at moments detected as `receptive' by our models.  \par  In addition to receptivity in daily living conditions, we explored how participants interact with affective health interventions while driving. We analyzed longitudinal data from 10 participants driving in their day-to-day lives for two months. In this exploratory work, we found that several high-level trip factors (traffic flow, trip length, and vehicle occupancy) and in-the-moment factors (road type, average speed, and braking behavior) showed significant associations with the participant's decision to start or cancel an intervention. Based on our analysis, we provide solid recommendations on delivering interventions to maximize responsiveness and effectiveness and minimize the burden on the drivers.  \par  Overall, this dissertation makes significant contributions to the respective sub-fields by addressing fundamental challenges, advancing the current state-of-the-art, and contribut- ing new knowledge, thereby laying a solid foundation for designing, implementing, and delivering future digital health interventions.},
}

@InProceedings{mishra:wellcomp,
  author =        {Varun Mishra and Sarah Hong and David Kotz},
  title =         {{Exploring the Relationship Between Intrinsic Motivation and Receptivity to mHealth Interventions}},
  booktitle =     {{Proceedings of UbiComp Workshop on Computing for Well-being (WellComp)}},
  year =          2024,
  month =         {October},
  pages =         {437--443},
  publisher =     {ACM},
  copyright =     {the authors},
  DOI =           {10.1145/3675094.3678498},
  URL =           {https://www.cs.dartmouth.edu/~kotz/research/mishra-wellcomp/index.html},
  abstract =      {Just-in-Time Adaptive Interventions aim to deliver the right type and amount of support at the right time. This involves determining a user's state of receptivity - the degree to which a user is willing to accept, process, and use the intervention. Although past work has found that users are more receptive to notifications they view as useful, there is no existing research on whether users' intrinsic motivation for the underlying topic of mHealth interventions affects their receptivity. To explore this, we conducted a study with 20 participants over three weeks, where participants interacted with a chatbot-based digital coach to receive interventions about mental health, COVID-19, physical activity, and diet \& nutrition. We found that significant differences in mean intrinsic motivation scores across topics were not associated with differences in mean receptivity metrics across topics. However, we discovered positive relationships between intrinsic motivation measures and receptivity for interventions about a topic.},
}

