ding.example.dqn_rnd¶
ding.example.dqn_rnd
¶
Full Source Code
../ding/example/dqn_rnd.py
1import gym 2from ditk import logging 3from ding.model import DQN 4from ding.policy import DQNPolicy 5from ding.reward_model import RndRewardModel 6from ding.envs import DingEnvWrapper, BaseEnvManagerV2 7from ding.data import DequeBuffer 8from ding.config import compile_config 9from ding.framework import task 10from ding.framework.context import OnlineRLContext 11from ding.framework.middleware import OffPolicyLearner, StepCollector, interaction_evaluator, data_pusher, trainer, \ 12 eps_greedy_handler, CkptSaver 13from ding.utils import set_pkg_seed 14from dizoo.classic_control.cartpole.config.cartpole_dqn_rnd_config import main_config, create_config 15 16 17def main(): 18 logging.getLogger().setLevel(logging.INFO) 19 cfg = compile_config(main_config, create_cfg=create_config, auto=True) 20 with task.start(async_mode=False, ctx=OnlineRLContext()): 21 collector_env = BaseEnvManagerV2( 22 env_fn=[lambda: DingEnvWrapper(gym.make("CartPole-v0")) for _ in range(cfg.env.collector_env_num)], 23 cfg=cfg.env.manager 24 ) 25 evaluator_env = BaseEnvManagerV2( 26 env_fn=[lambda: DingEnvWrapper(gym.make("CartPole-v0")) for _ in range(cfg.env.evaluator_env_num)], 27 cfg=cfg.env.manager 28 ) 29 30 set_pkg_seed(cfg.seed, use_cuda=cfg.policy.cuda) 31 32 model = DQN(**cfg.policy.model) 33 buffer_ = DequeBuffer(size=cfg.policy.other.replay_buffer.replay_buffer_size) 34 policy = DQNPolicy(cfg.policy, model=model) 35 reward_model = RndRewardModel(cfg.reward_model) 36 37 task.use(interaction_evaluator(cfg, policy.eval_mode, evaluator_env)) 38 task.use(eps_greedy_handler(cfg)) 39 task.use(StepCollector(cfg, policy.collect_mode, collector_env)) 40 task.use(trainer(cfg, reward_model)) 41 task.use(data_pusher(cfg, buffer_)) 42 task.use(OffPolicyLearner(cfg, policy.learn_mode, buffer_, reward_model=reward_model)) 43 task.use(CkptSaver(policy, cfg.exp_name, train_freq=100)) 44 task.run() 45 46 47if __name__ == "__main__": 48 main()