@article{2873, author = {Martin Gjoreski, Hristijan Gjoreski, Andrea Kulakov}, title = {Automatic Recognition of Emotions from Speech}, journal = {International Journal of Computational Linguistics Research}, year = {2019}, volume = {10}, number = {4}, doi = {https://doi.org/10.6025/jcl/2019/10/4/101-107}, url = {http://www.dline.info/jcl/fulltext/v10n4/jclv10n4_1.pdf}, abstract = {This paper presents an approach to recognition of human emotions from speech. Seven emotions are recognized: anger, fear, sadness, happiness, boredom, disgust and neutral. The approach is applied on a speech database, which consists of simulated and annotated utterances. First, numerical features are extracted from the sound database by using audio feature extractor. Next, the extracted features are standardized. Then, feature selection methods are used to select the most relevant features. Finally, a classification model is trained to recognize the emotions. Three classification algorithms are tested, with SVM yielding the highest accuracy of 89% and 82% using the 10 fold cross-validation and Leave-One- Speaker-Out techniques, respectively. “Sadness” is the emotion which is recognized with highest accuracy.}, }