Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update JOSS paper based on reviewer two #11

Merged
merged 10 commits into from
Apr 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
157 changes: 144 additions & 13 deletions paper/paper.bib
Original file line number Diff line number Diff line change
Expand Up @@ -8,20 +8,14 @@ @article{soukupovaRealTimeEyeBlink2016
url = {https://api.semanticscholar.org/CorpusID:35923299},
}

@misc{lugaresiMediaPipeFrameworkBuilding2019,
title = {{{MediaPipe}}: {{A Framework}} for {{Building Perception Pipelines}}},
shorttitle = {{{MediaPipe}}},
author = {Lugaresi, Camillo and Tang, Jiuqiang and Nash, Hadon and McClanahan, Chris and Uboweja, Esha and Hays, Michael and Zhang, Fan and Chang, Chuo-Ling and Yong, Ming Guang and Lee, Juhyun and Chang, Wan-Teh and Hua, Wei and Georg, Manfred and Grundmann, Matthias},
year = {2019},
month = jun,
number = {arXiv:1906.08172},
eprint = {1906.08172},
primaryclass = {cs},
publisher = {{arXiv}},
doi = {10.48550/arXiv.1906.08172},
archiveprefix = {arxiv},
@inproceedings{lugaresiMediaPipeFrameworkBuilding2019,
title = {{{MediaPipe}}: {{A}} Framework for Perceiving and Processing Reality},
booktitle = {Third Workshop on Computer Vision for {{AR}}/{{VR}} at {{IEEE}} Computer Vision and Pattern Recognition ({{CVPR}}) 2019},
author = {Lugaresi, Camillo and Tang, Jiuqiang and Nash, Hadon and McClanahan, Chris and Uboweja, Esha and Hays, Michael and Zhang, Fan and Chang, Chuo-Ling and Yong, Ming and Lee, Juhyun and Chang, Wan-Teh and Hua, Wei and Georg, Manfred and Grundmann, Matthias},
year = {2019}
}


@article{kartynnikRealtimeFacialSurface2019a,
title = {Real-Time {{Facial Surface Geometry}} from {{Monocular Video}} on {{Mobile GPUs}}},
author = {Kartynnik, Yury and Ablavatski, Artsiom and Grishchenko, Ivan and Grundmann, Matthias},
Expand Down Expand Up @@ -103,4 +97,141 @@ @ARTICLE{otsu
number={1},
pages={62-66},
doi={10.1109/TSMC.1979.4310076}
}
}

@article{kwonHighspeedCameraCharacterization2013,
title = {High-Speed Camera Characterization of Voluntary Eye Blinking Kinematics},
author = {Kwon, Kyung-Ah and Shipley, Rebecca J. and Edirisinghe, Mohan and Ezra, Daniel G. and Rose, Geoff and Best, Serena M. and Cameron, Ruth E.},
year = {2013},
month = aug,
journal = {Journal of the Royal Society, Interface},
volume = {10},
number = {85},
pages = {20130227},
issn = {1742-5662},
doi = {10.1098/rsif.2013.0227},
langid = {english},
pmcid = {PMC4043155},
pmid = {23760297},
}

@article{vanderwerfBlinkRecoveryPatients2007,
title = {Blink {{Recovery}} in {{Patients}} with {{Bell}}'s {{Palsy}}: {{A Neurophysiological}} and {{Behavioral Longitudinal Study}}},
shorttitle = {Blink {{Recovery}} in {{Patients}} with {{Bell}}'s {{Palsy}}},
author = {VanderWerf, Frans and Reits, Dik and Smit, Albertine Ellen and Metselaar, Mick},
year = {2007},
month = jan,
journal = {Investigative Ophthalmology \& Visual Science},
volume = {48},
number = {1},
pages = {203--213},
issn = {1552-5783},
doi = {10.1167/iovs.06-0499},
urldate = {2024-04-16},
}

@article{nuuttilaDiagnosticAccuracyGlabellar2021,
title = {Diagnostic Accuracy of Glabellar Tap Sign for {{Parkinson}}'s Disease},
author = {Nuuttila, Simo and Eklund, Mikael and Joutsa, Juho and Jaakkola, Elina and M{\"a}kinen, Elina and Honkanen, Emma A. and Lindholm, Kari and Noponen, Tommi and Ihalainen, Toni and Murtom{\"a}ki, Kirsi and Nojonen, Tanja and Levo, Reeta and Mertsalmi, Tuomas and Scheperjans, Filip and Kaasinen, Valtteri},
year = {2021},
journal = {Journal of Neural Transmission},
volume = {128},
number = {11},
pages = {1655--1661},
issn = {0300-9564},
doi = {10.1007/s00702-021-02391-3},
urldate = {2024-04-16},
}

@article{vanderwerfEyelidMovementsBehavioral2003,
title = {Eyelid Movements: Behavioral Studies of Blinking in Humans under Different Stimulus Conditions},
shorttitle = {Eyelid Movements},
author = {VanderWerf, Frans and Brassinga, Peter and Reits, Dik and Aramideh, Majid and {Ongerboer de Visser}, Bram},
year = {2003},
month = may,
journal = {Journal of Neurophysiology},
volume = {89},
number = {5},
pages = {2784--2796},
issn = {0022-3077},
langid = {english},
}

@article{cruzSpontaneousEyeblinkActivity2011,
title = {Spontaneous Eyeblink Activity},
author = {Cruz, Antonio A. V. and Garcia, Denny M. and Pinto, Carolina T. and Cechetti, Sheila P.},
year = {2011},
month = jan,
journal = {The Ocular Surface},
volume = {9},
number = {1},
pages = {29--41},
issn = {1542-0124},
langid = {english},
pmid = {21338567},
}

@article{volkInitialSeverityMotor2017,
title = {Initial Severity of Motor and Non-Motor Disabilities in Patients with Facial Palsy: An Assessment Using Patient-Reported Outcome Measures},
shorttitle = {Initial Severity of Motor and Non-Motor Disabilities in Patients with Facial Palsy},
author = {Volk, Gerd Fabian and Granitzka, Thordis and Kreysa, Helene and Klingner, Carsten M. and {Guntinas-Lichius}, Orlando},
year = {2017},
month = jan,
journal = {European archives of oto-rhino-laryngology: official journal of the European Federation of Oto-Rhino-Laryngological Societies (EUFOS): affiliated with the German Society for Oto-Rhino-Laryngology - Head and Neck Surgery},
volume = {274},
number = {1},
pages = {45--52},
issn = {1434-4726},
doi = {10.1007/s00405-016-4018-1},
abstract = {Patients with facial palsy (FP) not only suffer from their facial movement disorder, but also from social and psychological disabilities. These can be assessed by patient-reported outcome measures (PROMs) like the quality-of-life Short-Form 36 Item Questionnaire (SF36) or FP-specific instruments like the Facial Clinimetric Evaluation Scale (FaCE) or the Facial Disability Index (FDI). Not much is known about factors influencing PROMs in patients with FP. We identified predictors for baseline SF36, FaCE, and FDI scoring in 256 patients with unilateral peripheral FP using univariate correlation and multivariate linear regression analyses. Mean age was 52~{\textpm}~18~years. 153 patients (60~\%) were female. 90 patients (31~\%) and 176 patients (69~\%) were first seen {$<$}90 or {$>$}90~days after onset, respectively, i.e., with acute or chronic FP. House-Brackmann grading was 3.9~{\textpm}~1.4. FaCE subscores varied from 41~{\textpm}~28 to 71~{\textpm}~26, FDI scores from 65~{\textpm}~20 to 70~{\textpm}~22, and SF36 domains from 52~{\textpm}~20 to 80~{\textpm}~24. Older age, female gender, higher House-Brackmann grading, and initial assessment {$>$}90~days after onset were independent predictors for lower FaCE subscores and partly for lower FDI subscores (all p~{$<~$}0.05). Older age and female gender were best predictors for lower results in SF36 domains. Comorbidity was associated with lower SF General health perception and lower SF36 Emotional role (all p~{$<~$}0.05). Specific PROMs reveal that older and female patients and patients with chronic FP suffer particularly from motor and non-motor disabilities related to FP. Comorbidity unrelated to the FP could additionally impact the quality of life of patients with FP.},
langid = {english},
pmid = {27040558},
keywords = {Bell's palsy,Disability Evaluation,Disabled Persons,Facial nerve,Facial nerve reconstruction,Facial Paralysis,Humans,Patient Reported Outcome Measures,Patient-oriented methods,Quality of life,Quality of Life,Surveys and Questionnaires}
}

@article{louReviewAutomatedFacial2020,
title = {A {{Review}} on {{Automated Facial Nerve Function Assessment From Visual Face Capture}}},
author = {Lou, Jianwen and Yu, Hui and Wang, Fei-Yue},
year = {2020},
month = feb,
journal = {IEEE Transactions on Neural Systems and Rehabilitation Engineering},
volume = {28},
number = {2},
pages = {488--497},
issn = {1558-0210},
doi = {10.1109/TNSRE.2019.2961244},
}

@article{hochreiterMachineLearningBasedDetectingEyelid2023,
title = {Machine-{{Learning-Based Detecting}} of {{Eyelid Closure}} and {{Smiling Using Surface Electromyography}} of {{Auricular Muscles}} in {{Patients}} with {{Postparalytic Facial Synkinesis}}: {{A Feasibility Study}}},
shorttitle = {Machine-{{Learning-Based Detecting}} of {{Eyelid Closure}} and {{Smiling Using Surface Electromyography}} of {{Auricular Muscles}} in {{Patients}} with {{Postparalytic Facial Synkinesis}}},
author = {Hochreiter, Jakob and Hoche, Eric and Janik, Luisa and Volk, Gerd Fabian and Leistritz, Lutz and Anders, Christoph and {Guntinas-Lichius}, Orlando},
year = {2023},
month = jan,
journal = {Diagnostics},
volume = {13},
number = {3},
pages = {554},
publisher = {Multidisciplinary Digital Publishing Institute},
issn = {2075-4418},
doi = {10.3390/diagnostics13030554},
urldate = {2023-03-15},
langid = {english},
}

@article{chenSmartphoneBasedArtificialIntelligenceAssisted2021,
title = {Smartphone-{{Based Artificial Intelligence-Assisted Prediction}} for {{Eyelid Measurements}}: {{Algorithm Development}} and {{Observational Validation Study}}},
shorttitle = {Smartphone-{{Based Artificial Intelligence-Assisted Prediction}} for {{Eyelid Measurements}}},
author = {Chen, Hung-Chang and Tzeng, Shin-Shi and Hsiao, Yen-Chang and Chen, Ruei-Feng and Hung, Erh-Chien and Lee, Oscar K.},
year = {2021},
month = oct,
journal = {JMIR mHealth and uHealth},
volume = {9},
number = {10},
pages = {e32444},
issn = {2291-5222},
doi = {10.2196/32444},
langid = {english},
pmcid = {PMC8538024},
pmid = {34538776},
}
37 changes: 25 additions & 12 deletions paper/paper.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,14 +34,17 @@ bibliography: paper.bib

Analyzing facial features and expressions is a complex task in computer vision.
The human face is intricate, with significant shape, texture, and appearance variations.
In medical contexts, facial structures that differ from the norm, such as those affected by paralysis, are particularly important to study and require precise analysis.
One area of interest is the subtle movements involved in blinking, a process that is not yet fully understood and needs high-resolution, time-specific analysis for detailed understanding.
However, a significant challenge is that many advanced computer vision techniques demand programming skills, making them less accessible to medical professionals who may not have these skills.
In medical contexts, facial structures and movements that differ from the norm are particularly important to study and require precise analysis to understand the underlying conditions.
Given that solely the facial muscles, innervated by the facial nerve, are responsible for facial expressions, facial palsy can lead to severe impairments in facial movements [@volkInitialSeverityMotor2017;@louReviewAutomatedFacial2020].

One affected area of interest is the subtle movements involved in blinking [@vanderwerfBlinkRecoveryPatients2007;@nuuttilaDiagnosticAccuracyGlabellar2021;@vanderwerfEyelidMovementsBehavioral2003].
It is an intricate spontaneous process that is not yet fully understood and needs high-resolution, time-specific analysis for detailed understanding [@kwonHighspeedCameraCharacterization2013;@cruzSpontaneousEyeblinkActivity2011].
However, a significant challenge is that many computer vision techniques demand programming skills for automated extraction and analysis, making them less accessible to medical professionals who may not have these skills.
The Jena Facial Palsy Toolbox (JeFaPaTo) has been developed to bridge this gap.
It utilizes cutting-edge computer vision algorithms and offers a user-friendly interface for those without programming expertise.
This toolbox is designed to make advanced facial analysis more accessible to medical experts, simplifying integration into their workflow.
This toolbox makes advanced facial analysis more accessible to medical experts, simplifying integration into their workflow.

The state of the eye closure is of high interest to medical experts, e.g., in the context of facial palsy or Parkinson's disease.
This simple-to-use tool could enable medical professionals to quickly establish the blinking behavior of patients, providing valuable insights into their condition, especially in the context of facial palsy or Parkinson's disease [@nuuttilaDiagnosticAccuracyGlabellar2021;@vanderwerfBlinkRecoveryPatients2007].
Due to facial nerve damage, the eye-closing process might be impaired and could lead to many undesirable side effects.
Hence, more than a simple distinction between open and closed eyes is required for a detailed analysis.
Factors such as duration, synchronicity, velocity, complete closure, the time between blinks, and frequency over time are highly relevant.
Expand All @@ -50,21 +53,31 @@ Such detailed analysis could help medical experts better understand the blinking
# Statement of need

To analyze the blinking behavior in detail, medical experts often use high-speed cameras to record the blinking process.
Existing tools modeling the eye state based on the Eye-Aspect-Ratio (EAR), such as [@soukupovaRealTimeEyeBlink2016], only classify the eye state as open or closed, requiring a labeled dataset for training a suitable classifier.
Existing tools modeling the eye state based on the Eye-Aspect-Ratio (EAR), such as @soukupovaRealTimeEyeBlink2016, only classify the eye state as open or closed, requiring a labeled dataset for training a suitable classifier.
This approach neglects relevant information such as the blink intensity, duration, or partial blinks, which are crucial for a detailed analysis in a medical context.
Moreover, this simple classification approach does not factor in high temporal resolution video data, which is essential for a thorough analysis of the blinking process as most blinks are shorter than 100 ms.
We developed `JeFaPaTo` to go beyond the simple eye state classification and offer a method to extract complete blinking intervals for detailed analysis.
We aim to provide a custom tool that is easy for medical experts, abstracting the complexity of the underlying computer vision algorithms and high-temporal processing and enabling them to analyze blinking behavior without requiring programming skills.
An existing approach by @kwonHighspeedCameraCharacterization2013 for high temporal videos uses only every frame 5 ms and requires manual measuring of the upper and lower eyelid margins.
Other methods require additional sensors such as electromyography (EMG) or magnetic search coils to measure the eyelid movement [@vanderwerfBlinkRecoveryPatients2007;@vanderwerfEyelidMovementsBehavioral2003].
Such sensors necessitate additional human resources and are unsuitable for routine clinical analysis.
`JeFaPaTo` is a novel approach that combines the advantages of high temporal resolution video data [@kwonHighspeedCameraCharacterization2013] and computer vision algorithms [@soukupovaRealTimeEyeBlink2016]
to analyze the blinking behavior.

## Overview of JeFaPaTo

`JeFaPaTo` is a Python-based [@python] program to support medical and psychological experts in analyzing blinking and facial features for high temporal resolution video data.
The tool is split into two main parts: An extendable programming interface and a graphical user interface (GUI) entirely written in Python.
The programming interface enables efficient processing of temporal resolution video data, automatically extracts selected facial features, and provides a set of analysis functions specialized for blinking analysis.
The GUI offers non-programmers an intuitive way to use the analysis functions, visualize the results, and export the data for further analysis.
`JeFaPaTo` is designed to be extendable by additional analysis functions and facial features and is under joint development by computer vision and medical experts to ensure high usability and relevance for the target group.
We follow a two-way approach to encourage programmers and non-programmers to use the tool.
On the one hand, we provide a programming interface for efficiently processing high-temporal resolution video data, automatic facial feature extraction, and specialized blinking analysis functions.
This interface is extendable, allowing the easy addition of new or existing facial feature-based processing functions (e.g., mouth movement analysis [@hochreiterMachineLearningBasedDetectingEyelid2023] or MRD1/MRD2 [@chenSmartphoneBasedArtificialIntelligenceAssisted2021]).
On the other hand, we offer a graphical user interface (GUI) entirely written in Python to enable non-programmers to use the full analysis functions, visualize the results, and export the data for further analysis.
All functionalities of the programming interface are accessible through the GUI with additional input validations, making it easy for medical experts to use.
`JeFaPaTo` is designed to be extendable and transparent and is under joint development by computer vision and medical experts to ensure high usability and relevance for the target group.

`JeFaPaTo` leverages the `mediapipe` library [@lugaresiMediaPipeFrameworkBuilding2019;@kartynnikRealtimeFacialSurface2019a] to extract facial landmarks and blend shape features from video data at 60 FPS (on modern hardware).
With the landmarks, we compute the `EAR` (Eye-Aspect-Ratio) [@soukupovaRealTimeEyeBlink2016] for both eyes over the videos.
Additionally, `JeFaPaTo` detects blinks, matches left and right eye, and computes medically relevant statistics, a visual summary for the provided video, shown in \autoref{fig:summary}, and exports the data in various formats for further independent analysis.
Additionally, `JeFaPaTo` detects blinks, matches the left and right eye, and computes medically relevant statistics.
Furthermore, a visual summary for the video is provided in the GUI, shown in \autoref{fig:summary}, and the data can be exported in various formats for further independent analysis.
The visual summary lets medical experts quickly get an overview of the blinking behavior.
As shown in \autoref{fig:summary}, the blinks per minute are shown as a histogram over time in the upper axis, and the delay between blinks is shown in the right axis.
The main plot comprises the scatter plot of the `EAR` score for the left and right eye, and the dots indicate the detected blinks, with the rolling mean and standard deviation shown as a line plot.
Expand Down Expand Up @@ -127,7 +140,7 @@ Upon data extraction, corrections to the blinking state can be made directly wit
The D-Area displays the current video frame, given that the user supplies the original video.
While this feature is optional, it helps manually correct the blinking state when required.

## Extracted Medical Relevant Statistics
## Extracted Medically Relevant Statistics

We provided a set of relevant statistics for medical analysis of blinking behavior, which are valuable to healthcare experts.
The `JeFaPaTo` software is being developed in partnership with medical professionals to guarantee the included statistics are relevant.
Expand Down
Loading