@inproceedings{ff17b3c756614a27b53893d60ed2bfad,
title = "Reinforcement Learning for Quantization of Boundary Control Inputs: A Comparison of PPO-based Strategies",
abstract = "This paper investigates the boundary stabilization problem for the Korteweg-de Vries (KdV) system with quantized control inputs via the deep reinforcement learning (DRL) approach. To examine the impact of different placements of the quantizer on stabilization performance, we discuss two scenarios: the quantizer placed in the environment and in the agent. In the case of 'introducing the quantizer into the agent', we further explore two variations: optimizing the parameters of the discretized continuous distribution and directly optimizing the parameters of the discrete distribution. Finally, simulation results demonstrate that the proposed proximal policy optimization (PPO)-based strategies can train DRL controllers that effectively stabilize the target system, with the approach directly learning the parameters of the discrete distribution achieving the highest stabilization efficiency among the quantization-based scenarios.",
keywords = "Boundary stabilization, Deep reinforcement learning, Input quantization, The nonlinear Korteweg-de Vries equation",
author = "Yibo Wang and Wen Kang",
note = "Publisher Copyright: {\textcopyright} 2024 Technical Committee on Control Theory, Chinese Association of Automation.; 43rd Chinese Control Conference, CCC 2024 ; Conference date: 28-07-2024 Through 31-07-2024",
year = "2024",
doi = "10.23919/CCC63176.2024.10661946",
language = "English",
series = "Chinese Control Conference, CCC",
publisher = "IEEE Computer Society",
pages = "1093--1098",
editor = "Jing Na and Jian Sun",
booktitle = "Proceedings of the 43rd Chinese Control Conference, CCC 2024",
address = "United States",
}