@article{4119, author = {Rodrigo Agerri1, Eneko Agirre, Gorka Azkune, Roberto Centeno, Anselmo Peñas, German Rigau, Álvaro Rodrigo and Aitor Soroa}, title = {Pre-trained Language Models Based on Intricate Neural Networks}, journal = {International Journal of Computational Linguistics Research}, year = {2024}, volume = {15}, number = {3}, doi = {https://doi.org/10.6025/ijclr/2024/15/3/105-112}, url = {https://www.dline.info/jcl/fulltext/v15n3/jclv15n3_2.pdf}, abstract = {As the primary method for sharing information, Natural Language Processing (NLP) stands as a crucial technology in today’s digital revolution. Over the past few years, the field of NLP has played a significant role in developing advanced deep-learning methods and tools that transform how we tackle tasks related to Language Technology (LT). NLP has evolved from a traditional approach that relied on a series of interconnected modules for solutions to a more sophisticated model based on intricate neural networks, all trained on extensive text data. These recent developments have led to a significant shift in the approach to creating and utilizing large, pre-trained transformer-based language models. The outcomes have been so remarkable that systems are now achieving human-level performance in certain challenging language comprehension tasks. However, these large language models present significant challenges. There is a lack of understanding regarding their inner workings, failure points, and how to enhance their capabilities further. It’s crucial to recognise the constraints of these large language models. DeepKnowledge aims to explore the pre-training of large language models for the official languages of Spain, employing innovative methods to extract more detailed and applicable knowledge.}, }