WASET
	@article{(Open Science Index):https://publications.waset.org/pdf/10013490,
	  title     = {A Robust Visual SLAM for Indoor Dynamic Environment},
	  author    = {Xiang Zhang and  Daohong Yang and  Ziyuan Wu and  Lei Li and  Wanting Zhou},
	  country	= {},
	  institution	= {},
	  abstract     = {Visual Simultaneous Localization and Mapping (VSLAM) uses cameras to gather information in unknown environments to achieve simultaneous localization and mapping of the environment. This technology has a wide range of applications in autonomous driving, virtual reality, and other related fields. Currently, the research advancements related to VSLAM can maintain high accuracy in static environments. But in dynamic environments, the presence of moving objects in the scene can reduce the stability of the VSLAM system, leading to inaccurate localization and mapping, or even system failure. In this paper, a robust VSLAM method was proposed to effectively address the challenges in dynamic environments. We proposed a dynamic region removal scheme based on a semantic segmentation neural network and geometric constraints. Firstly, a semantic segmentation neural network is used to extract the prior active motion region, prior static region, and prior passive motion region in the environment. Then, the lightweight frame tracking module initializes the transform pose between the previous frame and the current frame on the prior static region. A motion consistency detection module based on multi-view geometry and scene flow is used to divide the environment into static regions and dynamic regions. Thus, the dynamic object region was successfully eliminated. Finally, only the static region is used for tracking thread. Our research is based on the ORBSLAM3 system, which is one of the most effective VSLAM systems available. We evaluated our method on the TUM RGB-D benchmark and the results demonstrate that the proposed VSLAM method improves the accuracy of the original ORBSLAM3 by 70%˜98.5% under a high dynamic environment.},
	    journal   = {International Journal of Cognitive and Language Sciences},
	  volume    = {18},
	  number    = {2},
	  year      = {2024},
	  pages     = {88 - 96},
	  ee        = {https://publications.waset.org/pdf/10013490},
	  url   	= {https://publications.waset.org/vol/206},
	  bibsource = {https://publications.waset.org/},
	  issn  	= {eISSN: 1307-6892},
	  publisher = {World Academy of Science, Engineering and Technology},
	  index 	= {Open Science Index 206, 2024},
	}