@article{1196, author = {Khadidja SADEDDINE, Fatma Zohra CHELALI, Rachida DJERADI, Amar DJERADI}, title = {Visual Speaker Verification System Depending on Arabic Syllables}, journal = {International Journal of Computational Linguistics Research}, year = {2013}, volume = {4}, number = {2}, doi = {}, url = {http://www.dline.info/jcl/fulltext/v4n2/2.pdf}, abstract = {We develop in this work a speaker verification system depending on Arabic syllables by the study of the visual speech that contains the visual and acoustic modalities. The visual signal provides both additional information that is not present in the audio and also a visual representation of some of the information that is present in the audio. This is particularly evident in the perception of speech where the articulatory gestures of the speaker’s lips and face can significantly improve the listener’s detection. In order to analyze Arabic visual speech, we extract features such as pitch and LPC coefficients for the acoustic modality, and we use DCT coefficients for lip images for visual modality. Hierarchical ascendant classification (HAC) is applied for each modality. Simulation results show good recognition rate of speaker verification depending on phoneme for the two modalities.}, }