@article{4082, author = {Luca Marzari, Gabriele Roncolato and Alessandro Farinelli}, title = {Formal Verification of DNN for Safety of Robotics Operation}, journal = {Digital Signal Processing and Artificial Intelligence for Automatic Learning}, year = {2024}, volume = {3}, number = {2}, doi = {https://doi.org/10.6025/dspaial/2024/3/2/44-57}, url = {https://www.dline.info/dspai/fulltext/v3n2/dspaiv3n2_1.pdf}, abstract = {Deep Neural Networks (DNNs) are formidable instruments that have demonstrated exceptional performance across various domains, from identifying patterns to tackling intricate challenges in robotics. However, their complex structures and opacity pose risks when they're used in practical settings. In this regard, Formal Verification (FV) of DNNs has become an essential approach to ensure reliable assurances regarding their safety features. Yet, the simple yes-or-no responses (safe or unsafe) may not provide sufficient information for immediate safety actions like ranking safety models or choosing the best one. To overcome this, the FV challenge has been broadened to include a counting version, known as #DNNVerification, aimed at determining the number of unsafe areas within a specified safety property's area. However, the problem's complexity makes it difficult for current solutions to handle large and complex DNNs in real-world robotic environments. To tackle this issue, we draw inspiration from recent developments in FV and introduce a new approach that combines reachability analysis with Symbolic Linear Relaxation and parallel computing to improve the effectiveness of both exact and approximate FV for DNN counter tasks. Our experimental tests on established FV benchmarks and realistic robotic scenarios reveal a significant enhancement in both scalability and efficiency, making it possible to apply these methods to complex robotic tasks.}, }