Skip to content

ding.example.sqil

ding.example.sqil

Full Source Code

../ding/example/sqil.py

1import gym 2from ditk import logging 3import torch 4from ding.model import DQN 5from ding.policy import SQLPolicy 6from ding.envs import DingEnvWrapper, BaseEnvManagerV2 7from ding.data import DequeBuffer 8from ding.config import compile_config 9from ding.framework import task 10from ding.framework.context import OnlineRLContext 11from ding.framework.middleware import OffPolicyLearner, StepCollector, interaction_evaluator, \ 12 eps_greedy_handler, CkptSaver, eps_greedy_masker, sqil_data_pusher 13from ding.utils import set_pkg_seed 14from dizoo.classic_control.cartpole.config.cartpole_sql_config import main_config as ex_main_config 15from dizoo.classic_control.cartpole.config.cartpole_sql_config import create_config as ex_create_config 16from dizoo.classic_control.cartpole.config.cartpole_sqil_config import main_config, create_config 17 18 19def main(): 20 logging.getLogger().setLevel(logging.INFO) 21 cfg = compile_config(main_config, create_cfg=create_config, auto=True) 22 expert_cfg = compile_config(ex_main_config, create_cfg=ex_create_config, auto=True) 23 # expert config must have the same `n_sample`. The line below ensure we do not need to modify the expert configs 24 expert_cfg.policy.collect.n_sample = cfg.policy.collect.n_sample 25 with task.start(async_mode=False, ctx=OnlineRLContext()): 26 collector_env = BaseEnvManagerV2( 27 env_fn=[lambda: DingEnvWrapper(gym.make("CartPole-v0")) for _ in range(cfg.env.collector_env_num)], 28 cfg=cfg.env.manager 29 ) 30 expert_collector_env = BaseEnvManagerV2( 31 env_fn=[lambda: DingEnvWrapper(gym.make("CartPole-v0")) for _ in range(cfg.env.collector_env_num)], 32 cfg=cfg.env.manager 33 ) 34 evaluator_env = BaseEnvManagerV2( 35 env_fn=[lambda: DingEnvWrapper(gym.make("CartPole-v0")) for _ in range(cfg.env.evaluator_env_num)], 36 cfg=cfg.env.manager 37 ) 38 39 set_pkg_seed(cfg.seed, use_cuda=cfg.policy.cuda) 40 41 model = DQN(**cfg.policy.model) 42 expert_model = DQN(**cfg.policy.model) 43 44 buffer_ = DequeBuffer(size=cfg.policy.other.replay_buffer.replay_buffer_size) 45 expert_buffer = DequeBuffer(size=cfg.policy.other.replay_buffer.replay_buffer_size) 46 47 policy = SQLPolicy(cfg.policy, model=model) 48 expert_policy = SQLPolicy(expert_cfg.policy, model=expert_model) 49 state_dict = torch.load(cfg.policy.collect.model_path, map_location='cpu') 50 expert_policy.collect_mode.load_state_dict(state_dict) 51 52 task.use(interaction_evaluator(cfg, policy.eval_mode, evaluator_env)) 53 task.use(eps_greedy_handler(cfg)) 54 task.use(StepCollector(cfg, policy.collect_mode, collector_env)) # agent data collector 55 task.use(sqil_data_pusher(cfg, buffer_, expert=False)) 56 task.use(eps_greedy_masker()) 57 task.use(StepCollector(cfg, expert_policy.collect_mode, expert_collector_env)) # expert data collector 58 task.use(sqil_data_pusher(cfg, expert_buffer, expert=True)) 59 task.use(OffPolicyLearner(cfg, policy.learn_mode, [(buffer_, 0.5), (expert_buffer, 0.5)])) 60 task.use(CkptSaver(policy, cfg.exp_name, train_freq=100)) 61 task.run() 62 63 64if __name__ == "__main__": 65 main()