ding.example.sqil_continuous¶
ding.example.sqil_continuous
¶
Full Source Code
../ding/example/sqil_continuous.py
1from ditk import logging 2import torch 3from ding.model import ContinuousQAC 4from ding.policy import SQILSACPolicy 5from ding.envs import BaseEnvManagerV2 6from ding.data import DequeBuffer 7from ding.config import compile_config 8from ding.framework import task 9from ding.framework.context import OnlineRLContext 10from ding.framework.middleware import OffPolicyLearner, StepCollector, interaction_evaluator, \ 11 CkptSaver, sqil_data_pusher, termination_checker 12from ding.utils import set_pkg_seed 13from dizoo.classic_control.pendulum.envs.pendulum_env import PendulumEnv 14from dizoo.classic_control.pendulum.config.pendulum_sac_config import main_config as ex_main_config 15from dizoo.classic_control.pendulum.config.pendulum_sac_config import create_config as ex_create_config 16from dizoo.classic_control.pendulum.config.pendulum_sqil_sac_config import main_config, create_config 17 18 19def main(): 20 logging.getLogger().setLevel(logging.INFO) 21 cfg = compile_config(main_config, create_cfg=create_config, auto=True) 22 expert_cfg = compile_config(ex_main_config, create_cfg=ex_create_config, auto=True) 23 # expert config must have the same `n_sample`. The line below ensure we do not need to modify the expert configs 24 expert_cfg.policy.collect.n_sample = cfg.policy.collect.n_sample 25 with task.start(async_mode=False, ctx=OnlineRLContext()): 26 collector_env = BaseEnvManagerV2( 27 env_fn=[lambda: PendulumEnv(cfg.env) for _ in range(cfg.env.collector_env_num)], cfg=cfg.env.manager 28 ) 29 expert_collector_env = BaseEnvManagerV2( 30 env_fn=[lambda: PendulumEnv(cfg.env) for _ in range(cfg.env.collector_env_num)], cfg=cfg.env.manager 31 ) 32 evaluator_env = BaseEnvManagerV2( 33 env_fn=[lambda: PendulumEnv(cfg.env) for _ in range(cfg.env.evaluator_env_num)], cfg=cfg.env.manager 34 ) 35 36 set_pkg_seed(cfg.seed, use_cuda=cfg.policy.cuda) 37 38 model = ContinuousQAC(**cfg.policy.model) 39 expert_model = ContinuousQAC(**cfg.policy.model) 40 41 buffer_ = DequeBuffer(size=cfg.policy.other.replay_buffer.replay_buffer_size) 42 expert_buffer = DequeBuffer(size=cfg.policy.other.replay_buffer.replay_buffer_size) 43 44 policy = SQILSACPolicy(cfg.policy, model=model) 45 expert_policy = SQILSACPolicy(expert_cfg.policy, model=expert_model) 46 state_dict = torch.load(cfg.policy.collect.model_path, map_location='cpu') 47 expert_policy.collect_mode.load_state_dict(state_dict) 48 49 task.use(interaction_evaluator(cfg, policy.eval_mode, evaluator_env)) 50 task.use( 51 StepCollector(cfg, policy.collect_mode, collector_env, random_collect_size=cfg.policy.random_collect_size) 52 ) # agent data collector 53 task.use(sqil_data_pusher(cfg, buffer_, expert=False)) 54 task.use( 55 StepCollector( 56 cfg, 57 expert_policy.collect_mode, 58 expert_collector_env, 59 random_collect_size=cfg.policy.expert_random_collect_size 60 ) 61 ) # expert data collector 62 task.use(sqil_data_pusher(cfg, expert_buffer, expert=True)) 63 task.use(OffPolicyLearner(cfg, policy.learn_mode, [(buffer_, 0.5), (expert_buffer, 0.5)])) 64 task.use(CkptSaver(policy, cfg.exp_name, train_freq=100)) 65 task.use(termination_checker(max_train_iter=10000)) 66 task.run() 67 68 69if __name__ == "__main__": 70 main()